Merge tag 'trace-v5.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID                 0x40
7 #define SNBEP_GIDNIDMAP                 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
16                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
20 #define SNBEP_PMON_CTL_RST              (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
23 #define SNBEP_PMON_CTL_EN               (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
27                                          SNBEP_PMON_CTL_UMASK_MASK | \
28                                          SNBEP_PMON_CTL_EDGE_DET | \
29                                          SNBEP_PMON_CTL_INVERT | \
30                                          SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
35                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36                                  SNBEP_PMON_CTL_UMASK_MASK | \
37                                  SNBEP_PMON_CTL_EDGE_DET | \
38                                  SNBEP_PMON_CTL_INVERT | \
39                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
43                                                  SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
51                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53                                  SNBEP_PMON_CTL_EDGE_DET | \
54                                  SNBEP_PMON_CTL_INVERT | \
55                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
60                                 (SNBEP_PMON_RAW_EVENT_MASK | \
61                                  SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
65 #define SNBEP_PCI_PMON_CTL0                     0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0                     0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
84 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
94 #define SNBEP_CBO_MSR_OFFSET                    0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
102         .event = (e),                           \
103         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
104         .config_mask = (m),                     \
105         .idx = (i)                              \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
121                                          SNBEP_PMON_CTL_UMASK_MASK | \
122                                          SNBEP_PMON_CTL_EDGE_DET | \
123                                          SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131                                  SNBEP_PMON_CTL_UMASK_MASK | \
132                                  SNBEP_PMON_CTL_EDGE_DET | \
133                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
136                                                  SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
150                                 (IVBEP_PMON_RAW_EVENT_MASK | \
151                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
154                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156                                  SNBEP_PMON_CTL_EDGE_DET | \
157                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
162                                 (IVBEP_PMON_RAW_EVENT_MASK | \
163                                  SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166                                 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0                   0x709
170 #define HSWEP_U_MSR_PMON_CTL0                   0x705
171 #define HSWEP_U_MSR_PMON_FILTER                 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
187 #define HSWEP_CBO_MSR_OFFSET                    0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
202 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
204 #define HSWEP_SBOX_MSR_OFFSET                   0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
206                                                  SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217                                                 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET                      0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223                                          KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
233 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
237 #define KNL_PMON_FIXED_CTL_EN                   0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
256                                                  KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265                                  SNBEP_PMON_CTL_EDGE_DET | \
266                                  SNBEP_CBO_PMON_CTL_TID_EN | \
267                                  SNBEP_PMON_CTL_INVERT | \
268                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID                   0xc0
274 #define SKX_GIDNIDMAP                   0xd4
275
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
290
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
295 #define SKX_IIO_MSR_OFFSET              0x20
296
297 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
299 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
302                                          SNBEP_PMON_CTL_UMASK_MASK | \
303                                          SNBEP_PMON_CTL_EDGE_DET | \
304                                          SNBEP_PMON_CTL_INVERT | \
305                                          SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307                                          SKX_PMON_CTL_CH_MASK | \
308                                          SKX_PMON_CTL_FC_MASK)
309
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
314 #define SKX_IRP_MSR_OFFSET              0x20
315
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0           0x350
318 #define SKX_UPI_PCI_PMON_CTR0           0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
320 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
321
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0           0x228
324 #define SKX_M2M_PCI_PMON_CTR0           0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
326
327 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
328 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
329 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
330 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
331 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
332 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
333 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
334 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
335 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
336 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
337 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
338 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
339 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
340 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
341 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
342 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
343 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
344 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
345 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
346 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
347 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
348 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
349 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
350 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
351 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
352 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
353 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
354 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
355 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
356 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
357 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
358 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
359 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
360 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
361 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
362 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
363 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
364 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
365 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
366 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
367 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
368 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
369 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
370 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
371 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
372 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
373 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
374 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
375 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
376 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
377 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
378 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
379 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
380 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
381 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
382 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
383 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
384 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
385 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
386 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
387 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
388 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
389 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
390 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
391 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
392 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
393 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
394 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
395 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
396 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
397 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
398 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
399
400 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
401 {
402         struct pci_dev *pdev = box->pci_dev;
403         int box_ctl = uncore_pci_box_ctl(box);
404         u32 config = 0;
405
406         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
407                 config |= SNBEP_PMON_BOX_CTL_FRZ;
408                 pci_write_config_dword(pdev, box_ctl, config);
409         }
410 }
411
412 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
413 {
414         struct pci_dev *pdev = box->pci_dev;
415         int box_ctl = uncore_pci_box_ctl(box);
416         u32 config = 0;
417
418         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
419                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
420                 pci_write_config_dword(pdev, box_ctl, config);
421         }
422 }
423
424 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
425 {
426         struct pci_dev *pdev = box->pci_dev;
427         struct hw_perf_event *hwc = &event->hw;
428
429         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
430 }
431
432 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
433 {
434         struct pci_dev *pdev = box->pci_dev;
435         struct hw_perf_event *hwc = &event->hw;
436
437         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
438 }
439
440 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
441 {
442         struct pci_dev *pdev = box->pci_dev;
443         struct hw_perf_event *hwc = &event->hw;
444         u64 count = 0;
445
446         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
447         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
448
449         return count;
450 }
451
452 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
453 {
454         struct pci_dev *pdev = box->pci_dev;
455         int box_ctl = uncore_pci_box_ctl(box);
456
457         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
458 }
459
460 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
461 {
462         u64 config;
463         unsigned msr;
464
465         msr = uncore_msr_box_ctl(box);
466         if (msr) {
467                 rdmsrl(msr, config);
468                 config |= SNBEP_PMON_BOX_CTL_FRZ;
469                 wrmsrl(msr, config);
470         }
471 }
472
473 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
474 {
475         u64 config;
476         unsigned msr;
477
478         msr = uncore_msr_box_ctl(box);
479         if (msr) {
480                 rdmsrl(msr, config);
481                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
482                 wrmsrl(msr, config);
483         }
484 }
485
486 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
487 {
488         struct hw_perf_event *hwc = &event->hw;
489         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
490
491         if (reg1->idx != EXTRA_REG_NONE)
492                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
493
494         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
495 }
496
497 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
498                                         struct perf_event *event)
499 {
500         struct hw_perf_event *hwc = &event->hw;
501
502         wrmsrl(hwc->config_base, hwc->config);
503 }
504
505 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
506 {
507         unsigned msr = uncore_msr_box_ctl(box);
508
509         if (msr)
510                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
511 }
512
513 static struct attribute *snbep_uncore_formats_attr[] = {
514         &format_attr_event.attr,
515         &format_attr_umask.attr,
516         &format_attr_edge.attr,
517         &format_attr_inv.attr,
518         &format_attr_thresh8.attr,
519         NULL,
520 };
521
522 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
523         &format_attr_event.attr,
524         &format_attr_umask.attr,
525         &format_attr_edge.attr,
526         &format_attr_inv.attr,
527         &format_attr_thresh5.attr,
528         NULL,
529 };
530
531 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
532         &format_attr_event.attr,
533         &format_attr_umask.attr,
534         &format_attr_edge.attr,
535         &format_attr_tid_en.attr,
536         &format_attr_inv.attr,
537         &format_attr_thresh8.attr,
538         &format_attr_filter_tid.attr,
539         &format_attr_filter_nid.attr,
540         &format_attr_filter_state.attr,
541         &format_attr_filter_opc.attr,
542         NULL,
543 };
544
545 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
546         &format_attr_event.attr,
547         &format_attr_occ_sel.attr,
548         &format_attr_edge.attr,
549         &format_attr_inv.attr,
550         &format_attr_thresh5.attr,
551         &format_attr_occ_invert.attr,
552         &format_attr_occ_edge.attr,
553         &format_attr_filter_band0.attr,
554         &format_attr_filter_band1.attr,
555         &format_attr_filter_band2.attr,
556         &format_attr_filter_band3.attr,
557         NULL,
558 };
559
560 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
561         &format_attr_event_ext.attr,
562         &format_attr_umask.attr,
563         &format_attr_edge.attr,
564         &format_attr_inv.attr,
565         &format_attr_thresh8.attr,
566         &format_attr_match_rds.attr,
567         &format_attr_match_rnid30.attr,
568         &format_attr_match_rnid4.attr,
569         &format_attr_match_dnid.attr,
570         &format_attr_match_mc.attr,
571         &format_attr_match_opc.attr,
572         &format_attr_match_vnw.attr,
573         &format_attr_match0.attr,
574         &format_attr_match1.attr,
575         &format_attr_mask_rds.attr,
576         &format_attr_mask_rnid30.attr,
577         &format_attr_mask_rnid4.attr,
578         &format_attr_mask_dnid.attr,
579         &format_attr_mask_mc.attr,
580         &format_attr_mask_opc.attr,
581         &format_attr_mask_vnw.attr,
582         &format_attr_mask0.attr,
583         &format_attr_mask1.attr,
584         NULL,
585 };
586
587 static struct uncore_event_desc snbep_uncore_imc_events[] = {
588         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
589         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
590         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
591         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
592         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
593         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
594         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
595         { /* end: all zeroes */ },
596 };
597
598 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
599         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
600         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
601         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
602         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
603         { /* end: all zeroes */ },
604 };
605
606 static const struct attribute_group snbep_uncore_format_group = {
607         .name = "format",
608         .attrs = snbep_uncore_formats_attr,
609 };
610
611 static const struct attribute_group snbep_uncore_ubox_format_group = {
612         .name = "format",
613         .attrs = snbep_uncore_ubox_formats_attr,
614 };
615
616 static const struct attribute_group snbep_uncore_cbox_format_group = {
617         .name = "format",
618         .attrs = snbep_uncore_cbox_formats_attr,
619 };
620
621 static const struct attribute_group snbep_uncore_pcu_format_group = {
622         .name = "format",
623         .attrs = snbep_uncore_pcu_formats_attr,
624 };
625
626 static const struct attribute_group snbep_uncore_qpi_format_group = {
627         .name = "format",
628         .attrs = snbep_uncore_qpi_formats_attr,
629 };
630
631 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
632         .disable_box    = snbep_uncore_msr_disable_box,         \
633         .enable_box     = snbep_uncore_msr_enable_box,          \
634         .disable_event  = snbep_uncore_msr_disable_event,       \
635         .enable_event   = snbep_uncore_msr_enable_event,        \
636         .read_counter   = uncore_msr_read_counter
637
638 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
639         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
640         .init_box       = snbep_uncore_msr_init_box             \
641
642 static struct intel_uncore_ops snbep_uncore_msr_ops = {
643         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
644 };
645
646 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
647         .init_box       = snbep_uncore_pci_init_box,            \
648         .disable_box    = snbep_uncore_pci_disable_box,         \
649         .enable_box     = snbep_uncore_pci_enable_box,          \
650         .disable_event  = snbep_uncore_pci_disable_event,       \
651         .read_counter   = snbep_uncore_pci_read_counter
652
653 static struct intel_uncore_ops snbep_uncore_pci_ops = {
654         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
655         .enable_event   = snbep_uncore_pci_enable_event,        \
656 };
657
658 static struct event_constraint snbep_uncore_cbox_constraints[] = {
659         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
660         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
661         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
662         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
663         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
664         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
665         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
666         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
667         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
668         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
669         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
670         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
671         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
672         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
673         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
674         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
675         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
676         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
677         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
678         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
679         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
680         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
681         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
682         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
683         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
684         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
685         EVENT_CONSTRAINT_END
686 };
687
688 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
689         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
690         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
691         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
692         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
693         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
694         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
695         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
696         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
697         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
698         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
699         EVENT_CONSTRAINT_END
700 };
701
702 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
703         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
704         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
705         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
706         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
707         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
708         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
709         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
710         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
711         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
712         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
713         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
714         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
715         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
716         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
717         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
718         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
719         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
720         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
721         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
722         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
723         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
724         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
725         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
726         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
727         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
728         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
729         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
730         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
731         EVENT_CONSTRAINT_END
732 };
733
734 static struct intel_uncore_type snbep_uncore_ubox = {
735         .name           = "ubox",
736         .num_counters   = 2,
737         .num_boxes      = 1,
738         .perf_ctr_bits  = 44,
739         .fixed_ctr_bits = 48,
740         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
741         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
742         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
743         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
744         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
745         .ops            = &snbep_uncore_msr_ops,
746         .format_group   = &snbep_uncore_ubox_format_group,
747 };
748
749 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
750         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
751                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
752         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
753         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
754         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
755         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
756         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
757         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
758         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
759         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
760         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
761         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
762         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
763         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
764         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
765         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
766         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
767         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
768         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
769         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
770         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
771         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
772         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
773         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
774         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
775         EVENT_EXTRA_END
776 };
777
778 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
779 {
780         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
781         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
782         int i;
783
784         if (uncore_box_is_fake(box))
785                 return;
786
787         for (i = 0; i < 5; i++) {
788                 if (reg1->alloc & (0x1 << i))
789                         atomic_sub(1 << (i * 6), &er->ref);
790         }
791         reg1->alloc = 0;
792 }
793
794 static struct event_constraint *
795 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
796                             u64 (*cbox_filter_mask)(int fields))
797 {
798         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
799         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
800         int i, alloc = 0;
801         unsigned long flags;
802         u64 mask;
803
804         if (reg1->idx == EXTRA_REG_NONE)
805                 return NULL;
806
807         raw_spin_lock_irqsave(&er->lock, flags);
808         for (i = 0; i < 5; i++) {
809                 if (!(reg1->idx & (0x1 << i)))
810                         continue;
811                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
812                         continue;
813
814                 mask = cbox_filter_mask(0x1 << i);
815                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
816                     !((reg1->config ^ er->config) & mask)) {
817                         atomic_add(1 << (i * 6), &er->ref);
818                         er->config &= ~mask;
819                         er->config |= reg1->config & mask;
820                         alloc |= (0x1 << i);
821                 } else {
822                         break;
823                 }
824         }
825         raw_spin_unlock_irqrestore(&er->lock, flags);
826         if (i < 5)
827                 goto fail;
828
829         if (!uncore_box_is_fake(box))
830                 reg1->alloc |= alloc;
831
832         return NULL;
833 fail:
834         for (; i >= 0; i--) {
835                 if (alloc & (0x1 << i))
836                         atomic_sub(1 << (i * 6), &er->ref);
837         }
838         return &uncore_constraint_empty;
839 }
840
841 static u64 snbep_cbox_filter_mask(int fields)
842 {
843         u64 mask = 0;
844
845         if (fields & 0x1)
846                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
847         if (fields & 0x2)
848                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
849         if (fields & 0x4)
850                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
851         if (fields & 0x8)
852                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
853
854         return mask;
855 }
856
857 static struct event_constraint *
858 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
859 {
860         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
861 }
862
863 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
864 {
865         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
866         struct extra_reg *er;
867         int idx = 0;
868
869         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
870                 if (er->event != (event->hw.config & er->config_mask))
871                         continue;
872                 idx |= er->idx;
873         }
874
875         if (idx) {
876                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
877                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
878                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
879                 reg1->idx = idx;
880         }
881         return 0;
882 }
883
884 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
885         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
886         .hw_config              = snbep_cbox_hw_config,
887         .get_constraint         = snbep_cbox_get_constraint,
888         .put_constraint         = snbep_cbox_put_constraint,
889 };
890
891 static struct intel_uncore_type snbep_uncore_cbox = {
892         .name                   = "cbox",
893         .num_counters           = 4,
894         .num_boxes              = 8,
895         .perf_ctr_bits          = 44,
896         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
897         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
898         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
899         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
900         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
901         .num_shared_regs        = 1,
902         .constraints            = snbep_uncore_cbox_constraints,
903         .ops                    = &snbep_uncore_cbox_ops,
904         .format_group           = &snbep_uncore_cbox_format_group,
905 };
906
907 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
908 {
909         struct hw_perf_event *hwc = &event->hw;
910         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
911         u64 config = reg1->config;
912
913         if (new_idx > reg1->idx)
914                 config <<= 8 * (new_idx - reg1->idx);
915         else
916                 config >>= 8 * (reg1->idx - new_idx);
917
918         if (modify) {
919                 hwc->config += new_idx - reg1->idx;
920                 reg1->config = config;
921                 reg1->idx = new_idx;
922         }
923         return config;
924 }
925
926 static struct event_constraint *
927 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
928 {
929         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
931         unsigned long flags;
932         int idx = reg1->idx;
933         u64 mask, config1 = reg1->config;
934         bool ok = false;
935
936         if (reg1->idx == EXTRA_REG_NONE ||
937             (!uncore_box_is_fake(box) && reg1->alloc))
938                 return NULL;
939 again:
940         mask = 0xffULL << (idx * 8);
941         raw_spin_lock_irqsave(&er->lock, flags);
942         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
943             !((config1 ^ er->config) & mask)) {
944                 atomic_add(1 << (idx * 8), &er->ref);
945                 er->config &= ~mask;
946                 er->config |= config1 & mask;
947                 ok = true;
948         }
949         raw_spin_unlock_irqrestore(&er->lock, flags);
950
951         if (!ok) {
952                 idx = (idx + 1) % 4;
953                 if (idx != reg1->idx) {
954                         config1 = snbep_pcu_alter_er(event, idx, false);
955                         goto again;
956                 }
957                 return &uncore_constraint_empty;
958         }
959
960         if (!uncore_box_is_fake(box)) {
961                 if (idx != reg1->idx)
962                         snbep_pcu_alter_er(event, idx, true);
963                 reg1->alloc = 1;
964         }
965         return NULL;
966 }
967
968 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
969 {
970         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
971         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
972
973         if (uncore_box_is_fake(box) || !reg1->alloc)
974                 return;
975
976         atomic_sub(1 << (reg1->idx * 8), &er->ref);
977         reg1->alloc = 0;
978 }
979
980 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
981 {
982         struct hw_perf_event *hwc = &event->hw;
983         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
984         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
985
986         if (ev_sel >= 0xb && ev_sel <= 0xe) {
987                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
988                 reg1->idx = ev_sel - 0xb;
989                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
990         }
991         return 0;
992 }
993
994 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
995         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
996         .hw_config              = snbep_pcu_hw_config,
997         .get_constraint         = snbep_pcu_get_constraint,
998         .put_constraint         = snbep_pcu_put_constraint,
999 };
1000
1001 static struct intel_uncore_type snbep_uncore_pcu = {
1002         .name                   = "pcu",
1003         .num_counters           = 4,
1004         .num_boxes              = 1,
1005         .perf_ctr_bits          = 48,
1006         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1007         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1008         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1009         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1010         .num_shared_regs        = 1,
1011         .ops                    = &snbep_uncore_pcu_ops,
1012         .format_group           = &snbep_uncore_pcu_format_group,
1013 };
1014
1015 static struct intel_uncore_type *snbep_msr_uncores[] = {
1016         &snbep_uncore_ubox,
1017         &snbep_uncore_cbox,
1018         &snbep_uncore_pcu,
1019         NULL,
1020 };
1021
1022 void snbep_uncore_cpu_init(void)
1023 {
1024         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1025                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1026         uncore_msr_uncores = snbep_msr_uncores;
1027 }
1028
1029 enum {
1030         SNBEP_PCI_QPI_PORT0_FILTER,
1031         SNBEP_PCI_QPI_PORT1_FILTER,
1032         BDX_PCI_QPI_PORT2_FILTER,
1033         HSWEP_PCI_PCU_3,
1034 };
1035
1036 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1037 {
1038         struct hw_perf_event *hwc = &event->hw;
1039         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1041
1042         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1043                 reg1->idx = 0;
1044                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1045                 reg1->config = event->attr.config1;
1046                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1047                 reg2->config = event->attr.config2;
1048         }
1049         return 0;
1050 }
1051
1052 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1053 {
1054         struct pci_dev *pdev = box->pci_dev;
1055         struct hw_perf_event *hwc = &event->hw;
1056         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1057         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1058
1059         if (reg1->idx != EXTRA_REG_NONE) {
1060                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1061                 int pkg = box->pkgid;
1062                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1063
1064                 if (filter_pdev) {
1065                         pci_write_config_dword(filter_pdev, reg1->reg,
1066                                                 (u32)reg1->config);
1067                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1068                                                 (u32)(reg1->config >> 32));
1069                         pci_write_config_dword(filter_pdev, reg2->reg,
1070                                                 (u32)reg2->config);
1071                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1072                                                 (u32)(reg2->config >> 32));
1073                 }
1074         }
1075
1076         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1077 }
1078
1079 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1080         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1081         .enable_event           = snbep_qpi_enable_event,
1082         .hw_config              = snbep_qpi_hw_config,
1083         .get_constraint         = uncore_get_constraint,
1084         .put_constraint         = uncore_put_constraint,
1085 };
1086
1087 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1088         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1089         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1090         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1091         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1092         .ops            = &snbep_uncore_pci_ops,                \
1093         .format_group   = &snbep_uncore_format_group
1094
1095 static struct intel_uncore_type snbep_uncore_ha = {
1096         .name           = "ha",
1097         .num_counters   = 4,
1098         .num_boxes      = 1,
1099         .perf_ctr_bits  = 48,
1100         SNBEP_UNCORE_PCI_COMMON_INIT(),
1101 };
1102
1103 static struct intel_uncore_type snbep_uncore_imc = {
1104         .name           = "imc",
1105         .num_counters   = 4,
1106         .num_boxes      = 4,
1107         .perf_ctr_bits  = 48,
1108         .fixed_ctr_bits = 48,
1109         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1110         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1111         .event_descs    = snbep_uncore_imc_events,
1112         SNBEP_UNCORE_PCI_COMMON_INIT(),
1113 };
1114
1115 static struct intel_uncore_type snbep_uncore_qpi = {
1116         .name                   = "qpi",
1117         .num_counters           = 4,
1118         .num_boxes              = 2,
1119         .perf_ctr_bits          = 48,
1120         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1121         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1122         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1123         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1124         .num_shared_regs        = 1,
1125         .ops                    = &snbep_uncore_qpi_ops,
1126         .event_descs            = snbep_uncore_qpi_events,
1127         .format_group           = &snbep_uncore_qpi_format_group,
1128 };
1129
1130
1131 static struct intel_uncore_type snbep_uncore_r2pcie = {
1132         .name           = "r2pcie",
1133         .num_counters   = 4,
1134         .num_boxes      = 1,
1135         .perf_ctr_bits  = 44,
1136         .constraints    = snbep_uncore_r2pcie_constraints,
1137         SNBEP_UNCORE_PCI_COMMON_INIT(),
1138 };
1139
1140 static struct intel_uncore_type snbep_uncore_r3qpi = {
1141         .name           = "r3qpi",
1142         .num_counters   = 3,
1143         .num_boxes      = 2,
1144         .perf_ctr_bits  = 44,
1145         .constraints    = snbep_uncore_r3qpi_constraints,
1146         SNBEP_UNCORE_PCI_COMMON_INIT(),
1147 };
1148
1149 enum {
1150         SNBEP_PCI_UNCORE_HA,
1151         SNBEP_PCI_UNCORE_IMC,
1152         SNBEP_PCI_UNCORE_QPI,
1153         SNBEP_PCI_UNCORE_R2PCIE,
1154         SNBEP_PCI_UNCORE_R3QPI,
1155 };
1156
1157 static struct intel_uncore_type *snbep_pci_uncores[] = {
1158         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1159         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1160         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1161         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1162         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1163         NULL,
1164 };
1165
1166 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1167         { /* Home Agent */
1168                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1169                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1170         },
1171         { /* MC Channel 0 */
1172                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1173                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1174         },
1175         { /* MC Channel 1 */
1176                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1177                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1178         },
1179         { /* MC Channel 2 */
1180                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1181                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1182         },
1183         { /* MC Channel 3 */
1184                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1185                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1186         },
1187         { /* QPI Port 0 */
1188                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1189                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1190         },
1191         { /* QPI Port 1 */
1192                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1193                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1194         },
1195         { /* R2PCIe */
1196                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1197                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1198         },
1199         { /* R3QPI Link 0 */
1200                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1201                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1202         },
1203         { /* R3QPI Link 1 */
1204                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1205                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1206         },
1207         { /* QPI Port 0 filter  */
1208                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1209                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1210                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1211         },
1212         { /* QPI Port 0 filter  */
1213                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1214                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1215                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1216         },
1217         { /* end: all zeroes */ }
1218 };
1219
1220 static struct pci_driver snbep_uncore_pci_driver = {
1221         .name           = "snbep_uncore",
1222         .id_table       = snbep_uncore_pci_ids,
1223 };
1224
1225 #define NODE_ID_MASK    0x7
1226
1227 /*
1228  * build pci bus to socket mapping
1229  */
1230 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1231 {
1232         struct pci_dev *ubox_dev = NULL;
1233         int i, bus, nodeid, segment;
1234         struct pci2phy_map *map;
1235         int err = 0;
1236         u32 config = 0;
1237
1238         while (1) {
1239                 /* find the UBOX device */
1240                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1241                 if (!ubox_dev)
1242                         break;
1243                 bus = ubox_dev->bus->number;
1244                 /* get the Node ID of the local register */
1245                 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1246                 if (err)
1247                         break;
1248                 nodeid = config & NODE_ID_MASK;
1249                 /* get the Node ID mapping */
1250                 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1251                 if (err)
1252                         break;
1253
1254                 segment = pci_domain_nr(ubox_dev->bus);
1255                 raw_spin_lock(&pci2phy_map_lock);
1256                 map = __find_pci2phy_map(segment);
1257                 if (!map) {
1258                         raw_spin_unlock(&pci2phy_map_lock);
1259                         err = -ENOMEM;
1260                         break;
1261                 }
1262
1263                 /*
1264                  * every three bits in the Node ID mapping register maps
1265                  * to a particular node.
1266                  */
1267                 for (i = 0; i < 8; i++) {
1268                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1269                                 map->pbus_to_physid[bus] = i;
1270                                 break;
1271                         }
1272                 }
1273                 raw_spin_unlock(&pci2phy_map_lock);
1274         }
1275
1276         if (!err) {
1277                 /*
1278                  * For PCI bus with no UBOX device, find the next bus
1279                  * that has UBOX device and use its mapping.
1280                  */
1281                 raw_spin_lock(&pci2phy_map_lock);
1282                 list_for_each_entry(map, &pci2phy_map_head, list) {
1283                         i = -1;
1284                         if (reverse) {
1285                                 for (bus = 255; bus >= 0; bus--) {
1286                                         if (map->pbus_to_physid[bus] >= 0)
1287                                                 i = map->pbus_to_physid[bus];
1288                                         else
1289                                                 map->pbus_to_physid[bus] = i;
1290                                 }
1291                         } else {
1292                                 for (bus = 0; bus <= 255; bus++) {
1293                                         if (map->pbus_to_physid[bus] >= 0)
1294                                                 i = map->pbus_to_physid[bus];
1295                                         else
1296                                                 map->pbus_to_physid[bus] = i;
1297                                 }
1298                         }
1299                 }
1300                 raw_spin_unlock(&pci2phy_map_lock);
1301         }
1302
1303         pci_dev_put(ubox_dev);
1304
1305         return err ? pcibios_err_to_errno(err) : 0;
1306 }
1307
1308 int snbep_uncore_pci_init(void)
1309 {
1310         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1311         if (ret)
1312                 return ret;
1313         uncore_pci_uncores = snbep_pci_uncores;
1314         uncore_pci_driver = &snbep_uncore_pci_driver;
1315         return 0;
1316 }
1317 /* end of Sandy Bridge-EP uncore support */
1318
1319 /* IvyTown uncore support */
1320 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1321 {
1322         unsigned msr = uncore_msr_box_ctl(box);
1323         if (msr)
1324                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1325 }
1326
1327 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1328 {
1329         struct pci_dev *pdev = box->pci_dev;
1330
1331         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1332 }
1333
1334 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1335         .init_box       = ivbep_uncore_msr_init_box,            \
1336         .disable_box    = snbep_uncore_msr_disable_box,         \
1337         .enable_box     = snbep_uncore_msr_enable_box,          \
1338         .disable_event  = snbep_uncore_msr_disable_event,       \
1339         .enable_event   = snbep_uncore_msr_enable_event,        \
1340         .read_counter   = uncore_msr_read_counter
1341
1342 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1343         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1344 };
1345
1346 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1347         .init_box       = ivbep_uncore_pci_init_box,
1348         .disable_box    = snbep_uncore_pci_disable_box,
1349         .enable_box     = snbep_uncore_pci_enable_box,
1350         .disable_event  = snbep_uncore_pci_disable_event,
1351         .enable_event   = snbep_uncore_pci_enable_event,
1352         .read_counter   = snbep_uncore_pci_read_counter,
1353 };
1354
1355 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1356         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1357         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1358         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1359         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1360         .ops            = &ivbep_uncore_pci_ops,                        \
1361         .format_group   = &ivbep_uncore_format_group
1362
1363 static struct attribute *ivbep_uncore_formats_attr[] = {
1364         &format_attr_event.attr,
1365         &format_attr_umask.attr,
1366         &format_attr_edge.attr,
1367         &format_attr_inv.attr,
1368         &format_attr_thresh8.attr,
1369         NULL,
1370 };
1371
1372 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1373         &format_attr_event.attr,
1374         &format_attr_umask.attr,
1375         &format_attr_edge.attr,
1376         &format_attr_inv.attr,
1377         &format_attr_thresh5.attr,
1378         NULL,
1379 };
1380
1381 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1382         &format_attr_event.attr,
1383         &format_attr_umask.attr,
1384         &format_attr_edge.attr,
1385         &format_attr_tid_en.attr,
1386         &format_attr_thresh8.attr,
1387         &format_attr_filter_tid.attr,
1388         &format_attr_filter_link.attr,
1389         &format_attr_filter_state2.attr,
1390         &format_attr_filter_nid2.attr,
1391         &format_attr_filter_opc2.attr,
1392         &format_attr_filter_nc.attr,
1393         &format_attr_filter_c6.attr,
1394         &format_attr_filter_isoc.attr,
1395         NULL,
1396 };
1397
1398 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1399         &format_attr_event.attr,
1400         &format_attr_occ_sel.attr,
1401         &format_attr_edge.attr,
1402         &format_attr_thresh5.attr,
1403         &format_attr_occ_invert.attr,
1404         &format_attr_occ_edge.attr,
1405         &format_attr_filter_band0.attr,
1406         &format_attr_filter_band1.attr,
1407         &format_attr_filter_band2.attr,
1408         &format_attr_filter_band3.attr,
1409         NULL,
1410 };
1411
1412 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1413         &format_attr_event_ext.attr,
1414         &format_attr_umask.attr,
1415         &format_attr_edge.attr,
1416         &format_attr_thresh8.attr,
1417         &format_attr_match_rds.attr,
1418         &format_attr_match_rnid30.attr,
1419         &format_attr_match_rnid4.attr,
1420         &format_attr_match_dnid.attr,
1421         &format_attr_match_mc.attr,
1422         &format_attr_match_opc.attr,
1423         &format_attr_match_vnw.attr,
1424         &format_attr_match0.attr,
1425         &format_attr_match1.attr,
1426         &format_attr_mask_rds.attr,
1427         &format_attr_mask_rnid30.attr,
1428         &format_attr_mask_rnid4.attr,
1429         &format_attr_mask_dnid.attr,
1430         &format_attr_mask_mc.attr,
1431         &format_attr_mask_opc.attr,
1432         &format_attr_mask_vnw.attr,
1433         &format_attr_mask0.attr,
1434         &format_attr_mask1.attr,
1435         NULL,
1436 };
1437
1438 static const struct attribute_group ivbep_uncore_format_group = {
1439         .name = "format",
1440         .attrs = ivbep_uncore_formats_attr,
1441 };
1442
1443 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1444         .name = "format",
1445         .attrs = ivbep_uncore_ubox_formats_attr,
1446 };
1447
1448 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1449         .name = "format",
1450         .attrs = ivbep_uncore_cbox_formats_attr,
1451 };
1452
1453 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1454         .name = "format",
1455         .attrs = ivbep_uncore_pcu_formats_attr,
1456 };
1457
1458 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1459         .name = "format",
1460         .attrs = ivbep_uncore_qpi_formats_attr,
1461 };
1462
1463 static struct intel_uncore_type ivbep_uncore_ubox = {
1464         .name           = "ubox",
1465         .num_counters   = 2,
1466         .num_boxes      = 1,
1467         .perf_ctr_bits  = 44,
1468         .fixed_ctr_bits = 48,
1469         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1470         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1471         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1472         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1473         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1474         .ops            = &ivbep_uncore_msr_ops,
1475         .format_group   = &ivbep_uncore_ubox_format_group,
1476 };
1477
1478 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1479         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1480                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1481         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1482         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1483         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1484         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1485         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1486         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1487         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1488         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1489         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1490         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1491         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1492         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1493         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1494         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1495         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1496         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1497         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1498         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1499         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1500         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1501         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1502         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1503         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1504         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1505         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1506         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1507         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1508         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1509         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1510         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1511         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1512         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1513         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1514         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1515         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1516         EVENT_EXTRA_END
1517 };
1518
1519 static u64 ivbep_cbox_filter_mask(int fields)
1520 {
1521         u64 mask = 0;
1522
1523         if (fields & 0x1)
1524                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1525         if (fields & 0x2)
1526                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1527         if (fields & 0x4)
1528                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1529         if (fields & 0x8)
1530                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1531         if (fields & 0x10) {
1532                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1533                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1534                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1535                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1536         }
1537
1538         return mask;
1539 }
1540
1541 static struct event_constraint *
1542 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1543 {
1544         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1545 }
1546
1547 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1548 {
1549         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1550         struct extra_reg *er;
1551         int idx = 0;
1552
1553         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1554                 if (er->event != (event->hw.config & er->config_mask))
1555                         continue;
1556                 idx |= er->idx;
1557         }
1558
1559         if (idx) {
1560                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1561                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1562                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1563                 reg1->idx = idx;
1564         }
1565         return 0;
1566 }
1567
1568 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1569 {
1570         struct hw_perf_event *hwc = &event->hw;
1571         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1572
1573         if (reg1->idx != EXTRA_REG_NONE) {
1574                 u64 filter = uncore_shared_reg_config(box, 0);
1575                 wrmsrl(reg1->reg, filter & 0xffffffff);
1576                 wrmsrl(reg1->reg + 6, filter >> 32);
1577         }
1578
1579         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1580 }
1581
1582 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1583         .init_box               = ivbep_uncore_msr_init_box,
1584         .disable_box            = snbep_uncore_msr_disable_box,
1585         .enable_box             = snbep_uncore_msr_enable_box,
1586         .disable_event          = snbep_uncore_msr_disable_event,
1587         .enable_event           = ivbep_cbox_enable_event,
1588         .read_counter           = uncore_msr_read_counter,
1589         .hw_config              = ivbep_cbox_hw_config,
1590         .get_constraint         = ivbep_cbox_get_constraint,
1591         .put_constraint         = snbep_cbox_put_constraint,
1592 };
1593
1594 static struct intel_uncore_type ivbep_uncore_cbox = {
1595         .name                   = "cbox",
1596         .num_counters           = 4,
1597         .num_boxes              = 15,
1598         .perf_ctr_bits          = 44,
1599         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1600         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1601         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1602         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1603         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1604         .num_shared_regs        = 1,
1605         .constraints            = snbep_uncore_cbox_constraints,
1606         .ops                    = &ivbep_uncore_cbox_ops,
1607         .format_group           = &ivbep_uncore_cbox_format_group,
1608 };
1609
1610 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1611         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1612         .hw_config              = snbep_pcu_hw_config,
1613         .get_constraint         = snbep_pcu_get_constraint,
1614         .put_constraint         = snbep_pcu_put_constraint,
1615 };
1616
1617 static struct intel_uncore_type ivbep_uncore_pcu = {
1618         .name                   = "pcu",
1619         .num_counters           = 4,
1620         .num_boxes              = 1,
1621         .perf_ctr_bits          = 48,
1622         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1623         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1624         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1625         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1626         .num_shared_regs        = 1,
1627         .ops                    = &ivbep_uncore_pcu_ops,
1628         .format_group           = &ivbep_uncore_pcu_format_group,
1629 };
1630
1631 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1632         &ivbep_uncore_ubox,
1633         &ivbep_uncore_cbox,
1634         &ivbep_uncore_pcu,
1635         NULL,
1636 };
1637
1638 void ivbep_uncore_cpu_init(void)
1639 {
1640         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1641                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1642         uncore_msr_uncores = ivbep_msr_uncores;
1643 }
1644
1645 static struct intel_uncore_type ivbep_uncore_ha = {
1646         .name           = "ha",
1647         .num_counters   = 4,
1648         .num_boxes      = 2,
1649         .perf_ctr_bits  = 48,
1650         IVBEP_UNCORE_PCI_COMMON_INIT(),
1651 };
1652
1653 static struct intel_uncore_type ivbep_uncore_imc = {
1654         .name           = "imc",
1655         .num_counters   = 4,
1656         .num_boxes      = 8,
1657         .perf_ctr_bits  = 48,
1658         .fixed_ctr_bits = 48,
1659         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1660         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1661         .event_descs    = snbep_uncore_imc_events,
1662         IVBEP_UNCORE_PCI_COMMON_INIT(),
1663 };
1664
1665 /* registers in IRP boxes are not properly aligned */
1666 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1667 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1668
1669 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1670 {
1671         struct pci_dev *pdev = box->pci_dev;
1672         struct hw_perf_event *hwc = &event->hw;
1673
1674         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1675                                hwc->config | SNBEP_PMON_CTL_EN);
1676 }
1677
1678 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1679 {
1680         struct pci_dev *pdev = box->pci_dev;
1681         struct hw_perf_event *hwc = &event->hw;
1682
1683         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1684 }
1685
1686 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1687 {
1688         struct pci_dev *pdev = box->pci_dev;
1689         struct hw_perf_event *hwc = &event->hw;
1690         u64 count = 0;
1691
1692         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1693         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1694
1695         return count;
1696 }
1697
1698 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1699         .init_box       = ivbep_uncore_pci_init_box,
1700         .disable_box    = snbep_uncore_pci_disable_box,
1701         .enable_box     = snbep_uncore_pci_enable_box,
1702         .disable_event  = ivbep_uncore_irp_disable_event,
1703         .enable_event   = ivbep_uncore_irp_enable_event,
1704         .read_counter   = ivbep_uncore_irp_read_counter,
1705 };
1706
1707 static struct intel_uncore_type ivbep_uncore_irp = {
1708         .name                   = "irp",
1709         .num_counters           = 4,
1710         .num_boxes              = 1,
1711         .perf_ctr_bits          = 48,
1712         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1713         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1714         .ops                    = &ivbep_uncore_irp_ops,
1715         .format_group           = &ivbep_uncore_format_group,
1716 };
1717
1718 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1719         .init_box       = ivbep_uncore_pci_init_box,
1720         .disable_box    = snbep_uncore_pci_disable_box,
1721         .enable_box     = snbep_uncore_pci_enable_box,
1722         .disable_event  = snbep_uncore_pci_disable_event,
1723         .enable_event   = snbep_qpi_enable_event,
1724         .read_counter   = snbep_uncore_pci_read_counter,
1725         .hw_config      = snbep_qpi_hw_config,
1726         .get_constraint = uncore_get_constraint,
1727         .put_constraint = uncore_put_constraint,
1728 };
1729
1730 static struct intel_uncore_type ivbep_uncore_qpi = {
1731         .name                   = "qpi",
1732         .num_counters           = 4,
1733         .num_boxes              = 3,
1734         .perf_ctr_bits          = 48,
1735         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1736         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1737         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1738         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1739         .num_shared_regs        = 1,
1740         .ops                    = &ivbep_uncore_qpi_ops,
1741         .format_group           = &ivbep_uncore_qpi_format_group,
1742 };
1743
1744 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1745         .name           = "r2pcie",
1746         .num_counters   = 4,
1747         .num_boxes      = 1,
1748         .perf_ctr_bits  = 44,
1749         .constraints    = snbep_uncore_r2pcie_constraints,
1750         IVBEP_UNCORE_PCI_COMMON_INIT(),
1751 };
1752
1753 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1754         .name           = "r3qpi",
1755         .num_counters   = 3,
1756         .num_boxes      = 2,
1757         .perf_ctr_bits  = 44,
1758         .constraints    = snbep_uncore_r3qpi_constraints,
1759         IVBEP_UNCORE_PCI_COMMON_INIT(),
1760 };
1761
1762 enum {
1763         IVBEP_PCI_UNCORE_HA,
1764         IVBEP_PCI_UNCORE_IMC,
1765         IVBEP_PCI_UNCORE_IRP,
1766         IVBEP_PCI_UNCORE_QPI,
1767         IVBEP_PCI_UNCORE_R2PCIE,
1768         IVBEP_PCI_UNCORE_R3QPI,
1769 };
1770
1771 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1772         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1773         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1774         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1775         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1776         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1777         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1778         NULL,
1779 };
1780
1781 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1782         { /* Home Agent 0 */
1783                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1784                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1785         },
1786         { /* Home Agent 1 */
1787                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1788                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1789         },
1790         { /* MC0 Channel 0 */
1791                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1792                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1793         },
1794         { /* MC0 Channel 1 */
1795                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1796                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1797         },
1798         { /* MC0 Channel 3 */
1799                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1800                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1801         },
1802         { /* MC0 Channel 4 */
1803                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1804                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1805         },
1806         { /* MC1 Channel 0 */
1807                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1808                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1809         },
1810         { /* MC1 Channel 1 */
1811                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1812                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1813         },
1814         { /* MC1 Channel 3 */
1815                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1816                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1817         },
1818         { /* MC1 Channel 4 */
1819                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1820                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1821         },
1822         { /* IRP */
1823                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1824                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1825         },
1826         { /* QPI0 Port 0 */
1827                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1828                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1829         },
1830         { /* QPI0 Port 1 */
1831                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1832                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1833         },
1834         { /* QPI1 Port 2 */
1835                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1836                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1837         },
1838         { /* R2PCIe */
1839                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1840                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1841         },
1842         { /* R3QPI0 Link 0 */
1843                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1844                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1845         },
1846         { /* R3QPI0 Link 1 */
1847                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1848                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1849         },
1850         { /* R3QPI1 Link 2 */
1851                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1852                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1853         },
1854         { /* QPI Port 0 filter  */
1855                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1856                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1857                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1858         },
1859         { /* QPI Port 0 filter  */
1860                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1861                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1862                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1863         },
1864         { /* end: all zeroes */ }
1865 };
1866
1867 static struct pci_driver ivbep_uncore_pci_driver = {
1868         .name           = "ivbep_uncore",
1869         .id_table       = ivbep_uncore_pci_ids,
1870 };
1871
1872 int ivbep_uncore_pci_init(void)
1873 {
1874         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1875         if (ret)
1876                 return ret;
1877         uncore_pci_uncores = ivbep_pci_uncores;
1878         uncore_pci_driver = &ivbep_uncore_pci_driver;
1879         return 0;
1880 }
1881 /* end of IvyTown uncore support */
1882
1883 /* KNL uncore support */
1884 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1885         &format_attr_event.attr,
1886         &format_attr_umask.attr,
1887         &format_attr_edge.attr,
1888         &format_attr_tid_en.attr,
1889         &format_attr_inv.attr,
1890         &format_attr_thresh5.attr,
1891         NULL,
1892 };
1893
1894 static const struct attribute_group knl_uncore_ubox_format_group = {
1895         .name = "format",
1896         .attrs = knl_uncore_ubox_formats_attr,
1897 };
1898
1899 static struct intel_uncore_type knl_uncore_ubox = {
1900         .name                   = "ubox",
1901         .num_counters           = 2,
1902         .num_boxes              = 1,
1903         .perf_ctr_bits          = 48,
1904         .fixed_ctr_bits         = 48,
1905         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1906         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1907         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1908         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1909         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1910         .ops                    = &snbep_uncore_msr_ops,
1911         .format_group           = &knl_uncore_ubox_format_group,
1912 };
1913
1914 static struct attribute *knl_uncore_cha_formats_attr[] = {
1915         &format_attr_event.attr,
1916         &format_attr_umask.attr,
1917         &format_attr_qor.attr,
1918         &format_attr_edge.attr,
1919         &format_attr_tid_en.attr,
1920         &format_attr_inv.attr,
1921         &format_attr_thresh8.attr,
1922         &format_attr_filter_tid4.attr,
1923         &format_attr_filter_link3.attr,
1924         &format_attr_filter_state4.attr,
1925         &format_attr_filter_local.attr,
1926         &format_attr_filter_all_op.attr,
1927         &format_attr_filter_nnm.attr,
1928         &format_attr_filter_opc3.attr,
1929         &format_attr_filter_nc.attr,
1930         &format_attr_filter_isoc.attr,
1931         NULL,
1932 };
1933
1934 static const struct attribute_group knl_uncore_cha_format_group = {
1935         .name = "format",
1936         .attrs = knl_uncore_cha_formats_attr,
1937 };
1938
1939 static struct event_constraint knl_uncore_cha_constraints[] = {
1940         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1941         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1942         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1943         EVENT_CONSTRAINT_END
1944 };
1945
1946 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1947         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1948                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1949         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1950         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1951         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1952         EVENT_EXTRA_END
1953 };
1954
1955 static u64 knl_cha_filter_mask(int fields)
1956 {
1957         u64 mask = 0;
1958
1959         if (fields & 0x1)
1960                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1961         if (fields & 0x2)
1962                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1963         if (fields & 0x4)
1964                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1965         return mask;
1966 }
1967
1968 static struct event_constraint *
1969 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1970 {
1971         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1972 }
1973
1974 static int knl_cha_hw_config(struct intel_uncore_box *box,
1975                              struct perf_event *event)
1976 {
1977         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1978         struct extra_reg *er;
1979         int idx = 0;
1980
1981         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1982                 if (er->event != (event->hw.config & er->config_mask))
1983                         continue;
1984                 idx |= er->idx;
1985         }
1986
1987         if (idx) {
1988                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1989                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1990                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1991
1992                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1993                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1994                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1995                 reg1->idx = idx;
1996         }
1997         return 0;
1998 }
1999
2000 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2001                                     struct perf_event *event);
2002
2003 static struct intel_uncore_ops knl_uncore_cha_ops = {
2004         .init_box               = snbep_uncore_msr_init_box,
2005         .disable_box            = snbep_uncore_msr_disable_box,
2006         .enable_box             = snbep_uncore_msr_enable_box,
2007         .disable_event          = snbep_uncore_msr_disable_event,
2008         .enable_event           = hswep_cbox_enable_event,
2009         .read_counter           = uncore_msr_read_counter,
2010         .hw_config              = knl_cha_hw_config,
2011         .get_constraint         = knl_cha_get_constraint,
2012         .put_constraint         = snbep_cbox_put_constraint,
2013 };
2014
2015 static struct intel_uncore_type knl_uncore_cha = {
2016         .name                   = "cha",
2017         .num_counters           = 4,
2018         .num_boxes              = 38,
2019         .perf_ctr_bits          = 48,
2020         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2021         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2022         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2023         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2024         .msr_offset             = KNL_CHA_MSR_OFFSET,
2025         .num_shared_regs        = 1,
2026         .constraints            = knl_uncore_cha_constraints,
2027         .ops                    = &knl_uncore_cha_ops,
2028         .format_group           = &knl_uncore_cha_format_group,
2029 };
2030
2031 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2032         &format_attr_event2.attr,
2033         &format_attr_use_occ_ctr.attr,
2034         &format_attr_occ_sel.attr,
2035         &format_attr_edge.attr,
2036         &format_attr_tid_en.attr,
2037         &format_attr_inv.attr,
2038         &format_attr_thresh6.attr,
2039         &format_attr_occ_invert.attr,
2040         &format_attr_occ_edge_det.attr,
2041         NULL,
2042 };
2043
2044 static const struct attribute_group knl_uncore_pcu_format_group = {
2045         .name = "format",
2046         .attrs = knl_uncore_pcu_formats_attr,
2047 };
2048
2049 static struct intel_uncore_type knl_uncore_pcu = {
2050         .name                   = "pcu",
2051         .num_counters           = 4,
2052         .num_boxes              = 1,
2053         .perf_ctr_bits          = 48,
2054         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2055         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2056         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2057         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2058         .ops                    = &snbep_uncore_msr_ops,
2059         .format_group           = &knl_uncore_pcu_format_group,
2060 };
2061
2062 static struct intel_uncore_type *knl_msr_uncores[] = {
2063         &knl_uncore_ubox,
2064         &knl_uncore_cha,
2065         &knl_uncore_pcu,
2066         NULL,
2067 };
2068
2069 void knl_uncore_cpu_init(void)
2070 {
2071         uncore_msr_uncores = knl_msr_uncores;
2072 }
2073
2074 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2075 {
2076         struct pci_dev *pdev = box->pci_dev;
2077         int box_ctl = uncore_pci_box_ctl(box);
2078
2079         pci_write_config_dword(pdev, box_ctl, 0);
2080 }
2081
2082 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2083                                         struct perf_event *event)
2084 {
2085         struct pci_dev *pdev = box->pci_dev;
2086         struct hw_perf_event *hwc = &event->hw;
2087
2088         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2089                                                         == UNCORE_FIXED_EVENT)
2090                 pci_write_config_dword(pdev, hwc->config_base,
2091                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2092         else
2093                 pci_write_config_dword(pdev, hwc->config_base,
2094                                        hwc->config | SNBEP_PMON_CTL_EN);
2095 }
2096
2097 static struct intel_uncore_ops knl_uncore_imc_ops = {
2098         .init_box       = snbep_uncore_pci_init_box,
2099         .disable_box    = snbep_uncore_pci_disable_box,
2100         .enable_box     = knl_uncore_imc_enable_box,
2101         .read_counter   = snbep_uncore_pci_read_counter,
2102         .enable_event   = knl_uncore_imc_enable_event,
2103         .disable_event  = snbep_uncore_pci_disable_event,
2104 };
2105
2106 static struct intel_uncore_type knl_uncore_imc_uclk = {
2107         .name                   = "imc_uclk",
2108         .num_counters           = 4,
2109         .num_boxes              = 2,
2110         .perf_ctr_bits          = 48,
2111         .fixed_ctr_bits         = 48,
2112         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2113         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2114         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2115         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2116         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2117         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2118         .ops                    = &knl_uncore_imc_ops,
2119         .format_group           = &snbep_uncore_format_group,
2120 };
2121
2122 static struct intel_uncore_type knl_uncore_imc_dclk = {
2123         .name                   = "imc",
2124         .num_counters           = 4,
2125         .num_boxes              = 6,
2126         .perf_ctr_bits          = 48,
2127         .fixed_ctr_bits         = 48,
2128         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2129         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2130         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2131         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2132         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2133         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2134         .ops                    = &knl_uncore_imc_ops,
2135         .format_group           = &snbep_uncore_format_group,
2136 };
2137
2138 static struct intel_uncore_type knl_uncore_edc_uclk = {
2139         .name                   = "edc_uclk",
2140         .num_counters           = 4,
2141         .num_boxes              = 8,
2142         .perf_ctr_bits          = 48,
2143         .fixed_ctr_bits         = 48,
2144         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2145         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2146         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2147         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2148         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2149         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2150         .ops                    = &knl_uncore_imc_ops,
2151         .format_group           = &snbep_uncore_format_group,
2152 };
2153
2154 static struct intel_uncore_type knl_uncore_edc_eclk = {
2155         .name                   = "edc_eclk",
2156         .num_counters           = 4,
2157         .num_boxes              = 8,
2158         .perf_ctr_bits          = 48,
2159         .fixed_ctr_bits         = 48,
2160         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2161         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2162         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2163         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2164         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2165         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2166         .ops                    = &knl_uncore_imc_ops,
2167         .format_group           = &snbep_uncore_format_group,
2168 };
2169
2170 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2171         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2172         EVENT_CONSTRAINT_END
2173 };
2174
2175 static struct intel_uncore_type knl_uncore_m2pcie = {
2176         .name           = "m2pcie",
2177         .num_counters   = 4,
2178         .num_boxes      = 1,
2179         .perf_ctr_bits  = 48,
2180         .constraints    = knl_uncore_m2pcie_constraints,
2181         SNBEP_UNCORE_PCI_COMMON_INIT(),
2182 };
2183
2184 static struct attribute *knl_uncore_irp_formats_attr[] = {
2185         &format_attr_event.attr,
2186         &format_attr_umask.attr,
2187         &format_attr_qor.attr,
2188         &format_attr_edge.attr,
2189         &format_attr_inv.attr,
2190         &format_attr_thresh8.attr,
2191         NULL,
2192 };
2193
2194 static const struct attribute_group knl_uncore_irp_format_group = {
2195         .name = "format",
2196         .attrs = knl_uncore_irp_formats_attr,
2197 };
2198
2199 static struct intel_uncore_type knl_uncore_irp = {
2200         .name                   = "irp",
2201         .num_counters           = 2,
2202         .num_boxes              = 1,
2203         .perf_ctr_bits          = 48,
2204         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2205         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2206         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2207         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2208         .ops                    = &snbep_uncore_pci_ops,
2209         .format_group           = &knl_uncore_irp_format_group,
2210 };
2211
2212 enum {
2213         KNL_PCI_UNCORE_MC_UCLK,
2214         KNL_PCI_UNCORE_MC_DCLK,
2215         KNL_PCI_UNCORE_EDC_UCLK,
2216         KNL_PCI_UNCORE_EDC_ECLK,
2217         KNL_PCI_UNCORE_M2PCIE,
2218         KNL_PCI_UNCORE_IRP,
2219 };
2220
2221 static struct intel_uncore_type *knl_pci_uncores[] = {
2222         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2223         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2224         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2225         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2226         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2227         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2228         NULL,
2229 };
2230
2231 /*
2232  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2233  * device type. prior to KNL, each instance of a PMU device type had a unique
2234  * device ID.
2235  *
2236  *      PCI Device ID   Uncore PMU Devices
2237  *      ----------------------------------
2238  *      0x7841          MC0 UClk, MC1 UClk
2239  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2240  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2241  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2242  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2243  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2244  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2245  *      0x7817          M2PCIe
2246  *      0x7814          IRP
2247 */
2248
2249 static const struct pci_device_id knl_uncore_pci_ids[] = {
2250         { /* MC0 UClk */
2251                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2252                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2253         },
2254         { /* MC1 UClk */
2255                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2256                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2257         },
2258         { /* MC0 DClk CH 0 */
2259                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2260                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2261         },
2262         { /* MC0 DClk CH 1 */
2263                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2264                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2265         },
2266         { /* MC0 DClk CH 2 */
2267                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2268                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2269         },
2270         { /* MC1 DClk CH 0 */
2271                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2272                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2273         },
2274         { /* MC1 DClk CH 1 */
2275                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2276                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2277         },
2278         { /* MC1 DClk CH 2 */
2279                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2280                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2281         },
2282         { /* EDC0 UClk */
2283                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2284                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2285         },
2286         { /* EDC1 UClk */
2287                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2288                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2289         },
2290         { /* EDC2 UClk */
2291                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2292                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2293         },
2294         { /* EDC3 UClk */
2295                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2296                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2297         },
2298         { /* EDC4 UClk */
2299                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2300                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2301         },
2302         { /* EDC5 UClk */
2303                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2304                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2305         },
2306         { /* EDC6 UClk */
2307                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2308                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2309         },
2310         { /* EDC7 UClk */
2311                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2312                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2313         },
2314         { /* EDC0 EClk */
2315                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2316                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2317         },
2318         { /* EDC1 EClk */
2319                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2320                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2321         },
2322         { /* EDC2 EClk */
2323                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2324                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2325         },
2326         { /* EDC3 EClk */
2327                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2328                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2329         },
2330         { /* EDC4 EClk */
2331                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2332                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2333         },
2334         { /* EDC5 EClk */
2335                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2336                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2337         },
2338         { /* EDC6 EClk */
2339                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2340                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2341         },
2342         { /* EDC7 EClk */
2343                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2344                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2345         },
2346         { /* M2PCIe */
2347                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2348                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2349         },
2350         { /* IRP */
2351                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2352                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2353         },
2354         { /* end: all zeroes */ }
2355 };
2356
2357 static struct pci_driver knl_uncore_pci_driver = {
2358         .name           = "knl_uncore",
2359         .id_table       = knl_uncore_pci_ids,
2360 };
2361
2362 int knl_uncore_pci_init(void)
2363 {
2364         int ret;
2365
2366         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2367         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2368         if (ret)
2369                 return ret;
2370         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2371         if (ret)
2372                 return ret;
2373         uncore_pci_uncores = knl_pci_uncores;
2374         uncore_pci_driver = &knl_uncore_pci_driver;
2375         return 0;
2376 }
2377
2378 /* end of KNL uncore support */
2379
2380 /* Haswell-EP uncore support */
2381 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2382         &format_attr_event.attr,
2383         &format_attr_umask.attr,
2384         &format_attr_edge.attr,
2385         &format_attr_inv.attr,
2386         &format_attr_thresh5.attr,
2387         &format_attr_filter_tid2.attr,
2388         &format_attr_filter_cid.attr,
2389         NULL,
2390 };
2391
2392 static const struct attribute_group hswep_uncore_ubox_format_group = {
2393         .name = "format",
2394         .attrs = hswep_uncore_ubox_formats_attr,
2395 };
2396
2397 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2398 {
2399         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2400         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2401         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2402         reg1->idx = 0;
2403         return 0;
2404 }
2405
2406 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2407         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2408         .hw_config              = hswep_ubox_hw_config,
2409         .get_constraint         = uncore_get_constraint,
2410         .put_constraint         = uncore_put_constraint,
2411 };
2412
2413 static struct intel_uncore_type hswep_uncore_ubox = {
2414         .name                   = "ubox",
2415         .num_counters           = 2,
2416         .num_boxes              = 1,
2417         .perf_ctr_bits          = 44,
2418         .fixed_ctr_bits         = 48,
2419         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2420         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2421         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2422         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2423         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2424         .num_shared_regs        = 1,
2425         .ops                    = &hswep_uncore_ubox_ops,
2426         .format_group           = &hswep_uncore_ubox_format_group,
2427 };
2428
2429 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2430         &format_attr_event.attr,
2431         &format_attr_umask.attr,
2432         &format_attr_edge.attr,
2433         &format_attr_tid_en.attr,
2434         &format_attr_thresh8.attr,
2435         &format_attr_filter_tid3.attr,
2436         &format_attr_filter_link2.attr,
2437         &format_attr_filter_state3.attr,
2438         &format_attr_filter_nid2.attr,
2439         &format_attr_filter_opc2.attr,
2440         &format_attr_filter_nc.attr,
2441         &format_attr_filter_c6.attr,
2442         &format_attr_filter_isoc.attr,
2443         NULL,
2444 };
2445
2446 static const struct attribute_group hswep_uncore_cbox_format_group = {
2447         .name = "format",
2448         .attrs = hswep_uncore_cbox_formats_attr,
2449 };
2450
2451 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2452         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2453         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2454         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2455         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2456         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2457         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2458         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2459         EVENT_CONSTRAINT_END
2460 };
2461
2462 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2463         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2464                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2465         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2466         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2467         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2468         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2469         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2470         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2471         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2472         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2473         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2474         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2475         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2476         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2477         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2478         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2479         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2480         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2481         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2482         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2483         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2484         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2485         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2486         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2487         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2488         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2489         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2490         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2491         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2492         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2493         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2494         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2495         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2496         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2497         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2498         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2499         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2500         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2501         EVENT_EXTRA_END
2502 };
2503
2504 static u64 hswep_cbox_filter_mask(int fields)
2505 {
2506         u64 mask = 0;
2507         if (fields & 0x1)
2508                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2509         if (fields & 0x2)
2510                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2511         if (fields & 0x4)
2512                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2513         if (fields & 0x8)
2514                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2515         if (fields & 0x10) {
2516                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2517                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2518                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2519                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2520         }
2521         return mask;
2522 }
2523
2524 static struct event_constraint *
2525 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2526 {
2527         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2528 }
2529
2530 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2531 {
2532         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2533         struct extra_reg *er;
2534         int idx = 0;
2535
2536         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2537                 if (er->event != (event->hw.config & er->config_mask))
2538                         continue;
2539                 idx |= er->idx;
2540         }
2541
2542         if (idx) {
2543                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2544                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2545                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2546                 reg1->idx = idx;
2547         }
2548         return 0;
2549 }
2550
2551 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2552                                   struct perf_event *event)
2553 {
2554         struct hw_perf_event *hwc = &event->hw;
2555         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2556
2557         if (reg1->idx != EXTRA_REG_NONE) {
2558                 u64 filter = uncore_shared_reg_config(box, 0);
2559                 wrmsrl(reg1->reg, filter & 0xffffffff);
2560                 wrmsrl(reg1->reg + 1, filter >> 32);
2561         }
2562
2563         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2564 }
2565
2566 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2567         .init_box               = snbep_uncore_msr_init_box,
2568         .disable_box            = snbep_uncore_msr_disable_box,
2569         .enable_box             = snbep_uncore_msr_enable_box,
2570         .disable_event          = snbep_uncore_msr_disable_event,
2571         .enable_event           = hswep_cbox_enable_event,
2572         .read_counter           = uncore_msr_read_counter,
2573         .hw_config              = hswep_cbox_hw_config,
2574         .get_constraint         = hswep_cbox_get_constraint,
2575         .put_constraint         = snbep_cbox_put_constraint,
2576 };
2577
2578 static struct intel_uncore_type hswep_uncore_cbox = {
2579         .name                   = "cbox",
2580         .num_counters           = 4,
2581         .num_boxes              = 18,
2582         .perf_ctr_bits          = 48,
2583         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2584         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2585         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2586         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2587         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2588         .num_shared_regs        = 1,
2589         .constraints            = hswep_uncore_cbox_constraints,
2590         .ops                    = &hswep_uncore_cbox_ops,
2591         .format_group           = &hswep_uncore_cbox_format_group,
2592 };
2593
2594 /*
2595  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2596  */
2597 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2598 {
2599         unsigned msr = uncore_msr_box_ctl(box);
2600
2601         if (msr) {
2602                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2603                 u64 flags = 0;
2604                 int i;
2605
2606                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2607                         flags |= (1ULL << i);
2608                         wrmsrl(msr, flags);
2609                 }
2610         }
2611 }
2612
2613 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2614         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2615         .init_box               = hswep_uncore_sbox_msr_init_box
2616 };
2617
2618 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2619         &format_attr_event.attr,
2620         &format_attr_umask.attr,
2621         &format_attr_edge.attr,
2622         &format_attr_tid_en.attr,
2623         &format_attr_inv.attr,
2624         &format_attr_thresh8.attr,
2625         NULL,
2626 };
2627
2628 static const struct attribute_group hswep_uncore_sbox_format_group = {
2629         .name = "format",
2630         .attrs = hswep_uncore_sbox_formats_attr,
2631 };
2632
2633 static struct intel_uncore_type hswep_uncore_sbox = {
2634         .name                   = "sbox",
2635         .num_counters           = 4,
2636         .num_boxes              = 4,
2637         .perf_ctr_bits          = 44,
2638         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2639         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2640         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2641         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2642         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2643         .ops                    = &hswep_uncore_sbox_msr_ops,
2644         .format_group           = &hswep_uncore_sbox_format_group,
2645 };
2646
2647 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2648 {
2649         struct hw_perf_event *hwc = &event->hw;
2650         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2651         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2652
2653         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2654                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2655                 reg1->idx = ev_sel - 0xb;
2656                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2657         }
2658         return 0;
2659 }
2660
2661 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2662         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2663         .hw_config              = hswep_pcu_hw_config,
2664         .get_constraint         = snbep_pcu_get_constraint,
2665         .put_constraint         = snbep_pcu_put_constraint,
2666 };
2667
2668 static struct intel_uncore_type hswep_uncore_pcu = {
2669         .name                   = "pcu",
2670         .num_counters           = 4,
2671         .num_boxes              = 1,
2672         .perf_ctr_bits          = 48,
2673         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2674         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2675         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2676         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2677         .num_shared_regs        = 1,
2678         .ops                    = &hswep_uncore_pcu_ops,
2679         .format_group           = &snbep_uncore_pcu_format_group,
2680 };
2681
2682 static struct intel_uncore_type *hswep_msr_uncores[] = {
2683         &hswep_uncore_ubox,
2684         &hswep_uncore_cbox,
2685         &hswep_uncore_sbox,
2686         &hswep_uncore_pcu,
2687         NULL,
2688 };
2689
2690 void hswep_uncore_cpu_init(void)
2691 {
2692         int pkg = boot_cpu_data.logical_proc_id;
2693
2694         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2695                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2696
2697         /* Detect 6-8 core systems with only two SBOXes */
2698         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2699                 u32 capid4;
2700
2701                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2702                                       0x94, &capid4);
2703                 if (((capid4 >> 6) & 0x3) == 0)
2704                         hswep_uncore_sbox.num_boxes = 2;
2705         }
2706
2707         uncore_msr_uncores = hswep_msr_uncores;
2708 }
2709
2710 static struct intel_uncore_type hswep_uncore_ha = {
2711         .name           = "ha",
2712         .num_counters   = 4,
2713         .num_boxes      = 2,
2714         .perf_ctr_bits  = 48,
2715         SNBEP_UNCORE_PCI_COMMON_INIT(),
2716 };
2717
2718 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2719         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2720         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2721         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2722         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2723         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2724         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2725         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2726         { /* end: all zeroes */ },
2727 };
2728
2729 static struct intel_uncore_type hswep_uncore_imc = {
2730         .name           = "imc",
2731         .num_counters   = 4,
2732         .num_boxes      = 8,
2733         .perf_ctr_bits  = 48,
2734         .fixed_ctr_bits = 48,
2735         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2736         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2737         .event_descs    = hswep_uncore_imc_events,
2738         SNBEP_UNCORE_PCI_COMMON_INIT(),
2739 };
2740
2741 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2742
2743 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2744 {
2745         struct pci_dev *pdev = box->pci_dev;
2746         struct hw_perf_event *hwc = &event->hw;
2747         u64 count = 0;
2748
2749         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2750         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2751
2752         return count;
2753 }
2754
2755 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2756         .init_box       = snbep_uncore_pci_init_box,
2757         .disable_box    = snbep_uncore_pci_disable_box,
2758         .enable_box     = snbep_uncore_pci_enable_box,
2759         .disable_event  = ivbep_uncore_irp_disable_event,
2760         .enable_event   = ivbep_uncore_irp_enable_event,
2761         .read_counter   = hswep_uncore_irp_read_counter,
2762 };
2763
2764 static struct intel_uncore_type hswep_uncore_irp = {
2765         .name                   = "irp",
2766         .num_counters           = 4,
2767         .num_boxes              = 1,
2768         .perf_ctr_bits          = 48,
2769         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2770         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2771         .ops                    = &hswep_uncore_irp_ops,
2772         .format_group           = &snbep_uncore_format_group,
2773 };
2774
2775 static struct intel_uncore_type hswep_uncore_qpi = {
2776         .name                   = "qpi",
2777         .num_counters           = 4,
2778         .num_boxes              = 3,
2779         .perf_ctr_bits          = 48,
2780         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2781         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2782         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2783         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2784         .num_shared_regs        = 1,
2785         .ops                    = &snbep_uncore_qpi_ops,
2786         .format_group           = &snbep_uncore_qpi_format_group,
2787 };
2788
2789 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2790         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2791         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2792         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2793         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2794         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2795         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2796         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2797         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2798         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2799         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2800         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2801         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2802         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2803         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2804         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2805         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2806         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2807         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2808         EVENT_CONSTRAINT_END
2809 };
2810
2811 static struct intel_uncore_type hswep_uncore_r2pcie = {
2812         .name           = "r2pcie",
2813         .num_counters   = 4,
2814         .num_boxes      = 1,
2815         .perf_ctr_bits  = 48,
2816         .constraints    = hswep_uncore_r2pcie_constraints,
2817         SNBEP_UNCORE_PCI_COMMON_INIT(),
2818 };
2819
2820 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2821         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2822         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2823         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2824         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2825         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2826         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2827         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2828         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2829         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2830         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2831         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2832         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2833         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2834         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2835         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2836         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2837         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2838         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2839         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2840         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2841         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2842         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2843         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2844         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2845         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2846         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2847         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2848         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2849         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2850         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2851         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2852         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2853         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2854         EVENT_CONSTRAINT_END
2855 };
2856
2857 static struct intel_uncore_type hswep_uncore_r3qpi = {
2858         .name           = "r3qpi",
2859         .num_counters   = 3,
2860         .num_boxes      = 3,
2861         .perf_ctr_bits  = 44,
2862         .constraints    = hswep_uncore_r3qpi_constraints,
2863         SNBEP_UNCORE_PCI_COMMON_INIT(),
2864 };
2865
2866 enum {
2867         HSWEP_PCI_UNCORE_HA,
2868         HSWEP_PCI_UNCORE_IMC,
2869         HSWEP_PCI_UNCORE_IRP,
2870         HSWEP_PCI_UNCORE_QPI,
2871         HSWEP_PCI_UNCORE_R2PCIE,
2872         HSWEP_PCI_UNCORE_R3QPI,
2873 };
2874
2875 static struct intel_uncore_type *hswep_pci_uncores[] = {
2876         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2877         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2878         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2879         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2880         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2881         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2882         NULL,
2883 };
2884
2885 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2886         { /* Home Agent 0 */
2887                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2888                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2889         },
2890         { /* Home Agent 1 */
2891                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2892                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2893         },
2894         { /* MC0 Channel 0 */
2895                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2896                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2897         },
2898         { /* MC0 Channel 1 */
2899                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2900                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2901         },
2902         { /* MC0 Channel 2 */
2903                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2904                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2905         },
2906         { /* MC0 Channel 3 */
2907                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2908                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2909         },
2910         { /* MC1 Channel 0 */
2911                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2912                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2913         },
2914         { /* MC1 Channel 1 */
2915                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2916                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2917         },
2918         { /* MC1 Channel 2 */
2919                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2920                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2921         },
2922         { /* MC1 Channel 3 */
2923                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2924                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2925         },
2926         { /* IRP */
2927                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2928                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2929         },
2930         { /* QPI0 Port 0 */
2931                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2932                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2933         },
2934         { /* QPI0 Port 1 */
2935                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2936                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2937         },
2938         { /* QPI1 Port 2 */
2939                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2940                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2941         },
2942         { /* R2PCIe */
2943                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2944                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2945         },
2946         { /* R3QPI0 Link 0 */
2947                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2948                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2949         },
2950         { /* R3QPI0 Link 1 */
2951                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2952                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2953         },
2954         { /* R3QPI1 Link 2 */
2955                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2956                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2957         },
2958         { /* QPI Port 0 filter  */
2959                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2960                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2961                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2962         },
2963         { /* QPI Port 1 filter  */
2964                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2965                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2966                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2967         },
2968         { /* PCU.3 (for Capability registers) */
2969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2970                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2971                                                    HSWEP_PCI_PCU_3),
2972         },
2973         { /* end: all zeroes */ }
2974 };
2975
2976 static struct pci_driver hswep_uncore_pci_driver = {
2977         .name           = "hswep_uncore",
2978         .id_table       = hswep_uncore_pci_ids,
2979 };
2980
2981 int hswep_uncore_pci_init(void)
2982 {
2983         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2984         if (ret)
2985                 return ret;
2986         uncore_pci_uncores = hswep_pci_uncores;
2987         uncore_pci_driver = &hswep_uncore_pci_driver;
2988         return 0;
2989 }
2990 /* end of Haswell-EP uncore support */
2991
2992 /* BDX uncore support */
2993
2994 static struct intel_uncore_type bdx_uncore_ubox = {
2995         .name                   = "ubox",
2996         .num_counters           = 2,
2997         .num_boxes              = 1,
2998         .perf_ctr_bits          = 48,
2999         .fixed_ctr_bits         = 48,
3000         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3001         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3002         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3003         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3004         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3005         .num_shared_regs        = 1,
3006         .ops                    = &ivbep_uncore_msr_ops,
3007         .format_group           = &ivbep_uncore_ubox_format_group,
3008 };
3009
3010 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3011         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3012         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3013         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3014         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3015         EVENT_CONSTRAINT_END
3016 };
3017
3018 static struct intel_uncore_type bdx_uncore_cbox = {
3019         .name                   = "cbox",
3020         .num_counters           = 4,
3021         .num_boxes              = 24,
3022         .perf_ctr_bits          = 48,
3023         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3024         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3025         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3026         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3027         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3028         .num_shared_regs        = 1,
3029         .constraints            = bdx_uncore_cbox_constraints,
3030         .ops                    = &hswep_uncore_cbox_ops,
3031         .format_group           = &hswep_uncore_cbox_format_group,
3032 };
3033
3034 static struct intel_uncore_type bdx_uncore_sbox = {
3035         .name                   = "sbox",
3036         .num_counters           = 4,
3037         .num_boxes              = 4,
3038         .perf_ctr_bits          = 48,
3039         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3040         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3041         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3042         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3043         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3044         .ops                    = &hswep_uncore_sbox_msr_ops,
3045         .format_group           = &hswep_uncore_sbox_format_group,
3046 };
3047
3048 #define BDX_MSR_UNCORE_SBOX     3
3049
3050 static struct intel_uncore_type *bdx_msr_uncores[] = {
3051         &bdx_uncore_ubox,
3052         &bdx_uncore_cbox,
3053         &hswep_uncore_pcu,
3054         &bdx_uncore_sbox,
3055         NULL,
3056 };
3057
3058 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3059 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3060         EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3061         EVENT_CONSTRAINT_END
3062 };
3063
3064 void bdx_uncore_cpu_init(void)
3065 {
3066         int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3067
3068         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3069                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3070         uncore_msr_uncores = bdx_msr_uncores;
3071
3072         /* BDX-DE doesn't have SBOX */
3073         if (boot_cpu_data.x86_model == 86) {
3074                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3075         /* Detect systems with no SBOXes */
3076         } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3077                 struct pci_dev *pdev;
3078                 u32 capid4;
3079
3080                 pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3081                 pci_read_config_dword(pdev, 0x94, &capid4);
3082                 if (((capid4 >> 6) & 0x3) == 0)
3083                         bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3084         }
3085         hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3086 }
3087
3088 static struct intel_uncore_type bdx_uncore_ha = {
3089         .name           = "ha",
3090         .num_counters   = 4,
3091         .num_boxes      = 2,
3092         .perf_ctr_bits  = 48,
3093         SNBEP_UNCORE_PCI_COMMON_INIT(),
3094 };
3095
3096 static struct intel_uncore_type bdx_uncore_imc = {
3097         .name           = "imc",
3098         .num_counters   = 4,
3099         .num_boxes      = 8,
3100         .perf_ctr_bits  = 48,
3101         .fixed_ctr_bits = 48,
3102         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3103         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3104         .event_descs    = hswep_uncore_imc_events,
3105         SNBEP_UNCORE_PCI_COMMON_INIT(),
3106 };
3107
3108 static struct intel_uncore_type bdx_uncore_irp = {
3109         .name                   = "irp",
3110         .num_counters           = 4,
3111         .num_boxes              = 1,
3112         .perf_ctr_bits          = 48,
3113         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3114         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3115         .ops                    = &hswep_uncore_irp_ops,
3116         .format_group           = &snbep_uncore_format_group,
3117 };
3118
3119 static struct intel_uncore_type bdx_uncore_qpi = {
3120         .name                   = "qpi",
3121         .num_counters           = 4,
3122         .num_boxes              = 3,
3123         .perf_ctr_bits          = 48,
3124         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3125         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3126         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3127         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3128         .num_shared_regs        = 1,
3129         .ops                    = &snbep_uncore_qpi_ops,
3130         .format_group           = &snbep_uncore_qpi_format_group,
3131 };
3132
3133 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3134         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3135         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3136         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3137         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3138         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3139         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3140         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3141         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3142         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3143         EVENT_CONSTRAINT_END
3144 };
3145
3146 static struct intel_uncore_type bdx_uncore_r2pcie = {
3147         .name           = "r2pcie",
3148         .num_counters   = 4,
3149         .num_boxes      = 1,
3150         .perf_ctr_bits  = 48,
3151         .constraints    = bdx_uncore_r2pcie_constraints,
3152         SNBEP_UNCORE_PCI_COMMON_INIT(),
3153 };
3154
3155 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3156         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3157         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3158         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3159         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3160         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3161         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3162         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3163         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3164         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3165         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3166         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3167         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3168         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3169         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3170         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3171         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3172         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3173         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3174         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3175         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3176         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3177         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3178         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3179         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3180         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3181         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3182         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3183         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3184         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3185         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3186         EVENT_CONSTRAINT_END
3187 };
3188
3189 static struct intel_uncore_type bdx_uncore_r3qpi = {
3190         .name           = "r3qpi",
3191         .num_counters   = 3,
3192         .num_boxes      = 3,
3193         .perf_ctr_bits  = 48,
3194         .constraints    = bdx_uncore_r3qpi_constraints,
3195         SNBEP_UNCORE_PCI_COMMON_INIT(),
3196 };
3197
3198 enum {
3199         BDX_PCI_UNCORE_HA,
3200         BDX_PCI_UNCORE_IMC,
3201         BDX_PCI_UNCORE_IRP,
3202         BDX_PCI_UNCORE_QPI,
3203         BDX_PCI_UNCORE_R2PCIE,
3204         BDX_PCI_UNCORE_R3QPI,
3205 };
3206
3207 static struct intel_uncore_type *bdx_pci_uncores[] = {
3208         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3209         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3210         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3211         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3212         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3213         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3214         NULL,
3215 };
3216
3217 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3218         { /* Home Agent 0 */
3219                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3220                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3221         },
3222         { /* Home Agent 1 */
3223                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3224                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3225         },
3226         { /* MC0 Channel 0 */
3227                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3228                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3229         },
3230         { /* MC0 Channel 1 */
3231                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3232                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3233         },
3234         { /* MC0 Channel 2 */
3235                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3236                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3237         },
3238         { /* MC0 Channel 3 */
3239                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3240                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3241         },
3242         { /* MC1 Channel 0 */
3243                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3244                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3245         },
3246         { /* MC1 Channel 1 */
3247                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3248                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3249         },
3250         { /* MC1 Channel 2 */
3251                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3252                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3253         },
3254         { /* MC1 Channel 3 */
3255                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3256                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3257         },
3258         { /* IRP */
3259                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3260                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3261         },
3262         { /* QPI0 Port 0 */
3263                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3264                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3265         },
3266         { /* QPI0 Port 1 */
3267                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3268                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3269         },
3270         { /* QPI1 Port 2 */
3271                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3272                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3273         },
3274         { /* R2PCIe */
3275                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3276                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3277         },
3278         { /* R3QPI0 Link 0 */
3279                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3280                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3281         },
3282         { /* R3QPI0 Link 1 */
3283                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3284                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3285         },
3286         { /* R3QPI1 Link 2 */
3287                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3288                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3289         },
3290         { /* QPI Port 0 filter  */
3291                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3292                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3293                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3294         },
3295         { /* QPI Port 1 filter  */
3296                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3297                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3298                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3299         },
3300         { /* QPI Port 2 filter  */
3301                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3302                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3303                                                    BDX_PCI_QPI_PORT2_FILTER),
3304         },
3305         { /* PCU.3 (for Capability registers) */
3306                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3307                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3308                                                    HSWEP_PCI_PCU_3),
3309         },
3310         { /* end: all zeroes */ }
3311 };
3312
3313 static struct pci_driver bdx_uncore_pci_driver = {
3314         .name           = "bdx_uncore",
3315         .id_table       = bdx_uncore_pci_ids,
3316 };
3317
3318 int bdx_uncore_pci_init(void)
3319 {
3320         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3321
3322         if (ret)
3323                 return ret;
3324         uncore_pci_uncores = bdx_pci_uncores;
3325         uncore_pci_driver = &bdx_uncore_pci_driver;
3326         return 0;
3327 }
3328
3329 /* end of BDX uncore support */
3330
3331 /* SKX uncore support */
3332
3333 static struct intel_uncore_type skx_uncore_ubox = {
3334         .name                   = "ubox",
3335         .num_counters           = 2,
3336         .num_boxes              = 1,
3337         .perf_ctr_bits          = 48,
3338         .fixed_ctr_bits         = 48,
3339         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3340         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3341         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3342         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3343         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3344         .ops                    = &ivbep_uncore_msr_ops,
3345         .format_group           = &ivbep_uncore_ubox_format_group,
3346 };
3347
3348 static struct attribute *skx_uncore_cha_formats_attr[] = {
3349         &format_attr_event.attr,
3350         &format_attr_umask.attr,
3351         &format_attr_edge.attr,
3352         &format_attr_tid_en.attr,
3353         &format_attr_inv.attr,
3354         &format_attr_thresh8.attr,
3355         &format_attr_filter_tid4.attr,
3356         &format_attr_filter_state5.attr,
3357         &format_attr_filter_rem.attr,
3358         &format_attr_filter_loc.attr,
3359         &format_attr_filter_nm.attr,
3360         &format_attr_filter_all_op.attr,
3361         &format_attr_filter_not_nm.attr,
3362         &format_attr_filter_opc_0.attr,
3363         &format_attr_filter_opc_1.attr,
3364         &format_attr_filter_nc.attr,
3365         &format_attr_filter_isoc.attr,
3366         NULL,
3367 };
3368
3369 static const struct attribute_group skx_uncore_chabox_format_group = {
3370         .name = "format",
3371         .attrs = skx_uncore_cha_formats_attr,
3372 };
3373
3374 static struct event_constraint skx_uncore_chabox_constraints[] = {
3375         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3376         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3377         EVENT_CONSTRAINT_END
3378 };
3379
3380 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3381         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3382         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3383         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3384         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3385         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3386         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3387         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3388         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3389         SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3390         EVENT_EXTRA_END
3391 };
3392
3393 static u64 skx_cha_filter_mask(int fields)
3394 {
3395         u64 mask = 0;
3396
3397         if (fields & 0x1)
3398                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3399         if (fields & 0x2)
3400                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3401         if (fields & 0x4)
3402                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3403         if (fields & 0x8) {
3404                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3405                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3406                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3407                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3408                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3409                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3410                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3411                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3412                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3413         }
3414         return mask;
3415 }
3416
3417 static struct event_constraint *
3418 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3419 {
3420         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3421 }
3422
3423 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3424 {
3425         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3426         struct extra_reg *er;
3427         int idx = 0;
3428
3429         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3430                 if (er->event != (event->hw.config & er->config_mask))
3431                         continue;
3432                 idx |= er->idx;
3433         }
3434
3435         if (idx) {
3436                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3437                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3438                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3439                 reg1->idx = idx;
3440         }
3441         return 0;
3442 }
3443
3444 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3445         /* There is no frz_en for chabox ctl */
3446         .init_box               = ivbep_uncore_msr_init_box,
3447         .disable_box            = snbep_uncore_msr_disable_box,
3448         .enable_box             = snbep_uncore_msr_enable_box,
3449         .disable_event          = snbep_uncore_msr_disable_event,
3450         .enable_event           = hswep_cbox_enable_event,
3451         .read_counter           = uncore_msr_read_counter,
3452         .hw_config              = skx_cha_hw_config,
3453         .get_constraint         = skx_cha_get_constraint,
3454         .put_constraint         = snbep_cbox_put_constraint,
3455 };
3456
3457 static struct intel_uncore_type skx_uncore_chabox = {
3458         .name                   = "cha",
3459         .num_counters           = 4,
3460         .perf_ctr_bits          = 48,
3461         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3462         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3463         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3464         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3465         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3466         .num_shared_regs        = 1,
3467         .constraints            = skx_uncore_chabox_constraints,
3468         .ops                    = &skx_uncore_chabox_ops,
3469         .format_group           = &skx_uncore_chabox_format_group,
3470 };
3471
3472 static struct attribute *skx_uncore_iio_formats_attr[] = {
3473         &format_attr_event.attr,
3474         &format_attr_umask.attr,
3475         &format_attr_edge.attr,
3476         &format_attr_inv.attr,
3477         &format_attr_thresh9.attr,
3478         &format_attr_ch_mask.attr,
3479         &format_attr_fc_mask.attr,
3480         NULL,
3481 };
3482
3483 static const struct attribute_group skx_uncore_iio_format_group = {
3484         .name = "format",
3485         .attrs = skx_uncore_iio_formats_attr,
3486 };
3487
3488 static struct event_constraint skx_uncore_iio_constraints[] = {
3489         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3490         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3491         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3492         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3493         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3494         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3495         EVENT_CONSTRAINT_END
3496 };
3497
3498 static void skx_iio_enable_event(struct intel_uncore_box *box,
3499                                  struct perf_event *event)
3500 {
3501         struct hw_perf_event *hwc = &event->hw;
3502
3503         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3504 }
3505
3506 static struct intel_uncore_ops skx_uncore_iio_ops = {
3507         .init_box               = ivbep_uncore_msr_init_box,
3508         .disable_box            = snbep_uncore_msr_disable_box,
3509         .enable_box             = snbep_uncore_msr_enable_box,
3510         .disable_event          = snbep_uncore_msr_disable_event,
3511         .enable_event           = skx_iio_enable_event,
3512         .read_counter           = uncore_msr_read_counter,
3513 };
3514
3515 static struct intel_uncore_type skx_uncore_iio = {
3516         .name                   = "iio",
3517         .num_counters           = 4,
3518         .num_boxes              = 6,
3519         .perf_ctr_bits          = 48,
3520         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3521         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3522         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3523         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3524         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3525         .msr_offset             = SKX_IIO_MSR_OFFSET,
3526         .constraints            = skx_uncore_iio_constraints,
3527         .ops                    = &skx_uncore_iio_ops,
3528         .format_group           = &skx_uncore_iio_format_group,
3529 };
3530
3531 enum perf_uncore_iio_freerunning_type_id {
3532         SKX_IIO_MSR_IOCLK                       = 0,
3533         SKX_IIO_MSR_BW                          = 1,
3534         SKX_IIO_MSR_UTIL                        = 2,
3535
3536         SKX_IIO_FREERUNNING_TYPE_MAX,
3537 };
3538
3539
3540 static struct freerunning_counters skx_iio_freerunning[] = {
3541         [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3542         [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3543         [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3544 };
3545
3546 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3547         /* Free-Running IO CLOCKS Counter */
3548         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3549         /* Free-Running IIO BANDWIDTH Counters */
3550         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3551         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3552         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3553         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3554         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3555         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3556         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3557         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3558         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3559         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3560         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3561         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3562         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3563         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3564         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3565         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3566         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3567         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3568         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3569         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3570         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3571         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3572         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3573         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3574         /* Free-running IIO UTILIZATION Counters */
3575         INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3576         INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3577         INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3578         INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3579         INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3580         INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3581         INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3582         INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3583         { /* end: all zeroes */ },
3584 };
3585
3586 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3587         .read_counter           = uncore_msr_read_counter,
3588 };
3589
3590 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3591         &format_attr_event.attr,
3592         &format_attr_umask.attr,
3593         NULL,
3594 };
3595
3596 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3597         .name = "format",
3598         .attrs = skx_uncore_iio_freerunning_formats_attr,
3599 };
3600
3601 static struct intel_uncore_type skx_uncore_iio_free_running = {
3602         .name                   = "iio_free_running",
3603         .num_counters           = 17,
3604         .num_boxes              = 6,
3605         .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3606         .freerunning            = skx_iio_freerunning,
3607         .ops                    = &skx_uncore_iio_freerunning_ops,
3608         .event_descs            = skx_uncore_iio_freerunning_events,
3609         .format_group           = &skx_uncore_iio_freerunning_format_group,
3610 };
3611
3612 static struct attribute *skx_uncore_formats_attr[] = {
3613         &format_attr_event.attr,
3614         &format_attr_umask.attr,
3615         &format_attr_edge.attr,
3616         &format_attr_inv.attr,
3617         &format_attr_thresh8.attr,
3618         NULL,
3619 };
3620
3621 static const struct attribute_group skx_uncore_format_group = {
3622         .name = "format",
3623         .attrs = skx_uncore_formats_attr,
3624 };
3625
3626 static struct intel_uncore_type skx_uncore_irp = {
3627         .name                   = "irp",
3628         .num_counters           = 2,
3629         .num_boxes              = 6,
3630         .perf_ctr_bits          = 48,
3631         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3632         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3633         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3634         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3635         .msr_offset             = SKX_IRP_MSR_OFFSET,
3636         .ops                    = &skx_uncore_iio_ops,
3637         .format_group           = &skx_uncore_format_group,
3638 };
3639
3640 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3641         &format_attr_event.attr,
3642         &format_attr_umask.attr,
3643         &format_attr_edge.attr,
3644         &format_attr_inv.attr,
3645         &format_attr_thresh8.attr,
3646         &format_attr_occ_invert.attr,
3647         &format_attr_occ_edge_det.attr,
3648         &format_attr_filter_band0.attr,
3649         &format_attr_filter_band1.attr,
3650         &format_attr_filter_band2.attr,
3651         &format_attr_filter_band3.attr,
3652         NULL,
3653 };
3654
3655 static struct attribute_group skx_uncore_pcu_format_group = {
3656         .name = "format",
3657         .attrs = skx_uncore_pcu_formats_attr,
3658 };
3659
3660 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3661         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3662         .hw_config              = hswep_pcu_hw_config,
3663         .get_constraint         = snbep_pcu_get_constraint,
3664         .put_constraint         = snbep_pcu_put_constraint,
3665 };
3666
3667 static struct intel_uncore_type skx_uncore_pcu = {
3668         .name                   = "pcu",
3669         .num_counters           = 4,
3670         .num_boxes              = 1,
3671         .perf_ctr_bits          = 48,
3672         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3673         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3674         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3675         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3676         .num_shared_regs        = 1,
3677         .ops                    = &skx_uncore_pcu_ops,
3678         .format_group           = &skx_uncore_pcu_format_group,
3679 };
3680
3681 static struct intel_uncore_type *skx_msr_uncores[] = {
3682         &skx_uncore_ubox,
3683         &skx_uncore_chabox,
3684         &skx_uncore_iio,
3685         &skx_uncore_iio_free_running,
3686         &skx_uncore_irp,
3687         &skx_uncore_pcu,
3688         NULL,
3689 };
3690
3691 /*
3692  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3693  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3694  */
3695 #define SKX_CAPID6              0x9c
3696 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
3697
3698 static int skx_count_chabox(void)
3699 {
3700         struct pci_dev *dev = NULL;
3701         u32 val = 0;
3702
3703         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3704         if (!dev)
3705                 goto out;
3706
3707         pci_read_config_dword(dev, SKX_CAPID6, &val);
3708         val &= SKX_CHA_BIT_MASK;
3709 out:
3710         pci_dev_put(dev);
3711         return hweight32(val);
3712 }
3713
3714 void skx_uncore_cpu_init(void)
3715 {
3716         skx_uncore_chabox.num_boxes = skx_count_chabox();
3717         uncore_msr_uncores = skx_msr_uncores;
3718 }
3719
3720 static struct intel_uncore_type skx_uncore_imc = {
3721         .name           = "imc",
3722         .num_counters   = 4,
3723         .num_boxes      = 6,
3724         .perf_ctr_bits  = 48,
3725         .fixed_ctr_bits = 48,
3726         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3727         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3728         .event_descs    = hswep_uncore_imc_events,
3729         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3730         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3731         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3732         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3733         .ops            = &ivbep_uncore_pci_ops,
3734         .format_group   = &skx_uncore_format_group,
3735 };
3736
3737 static struct attribute *skx_upi_uncore_formats_attr[] = {
3738         &format_attr_event.attr,
3739         &format_attr_umask_ext.attr,
3740         &format_attr_edge.attr,
3741         &format_attr_inv.attr,
3742         &format_attr_thresh8.attr,
3743         NULL,
3744 };
3745
3746 static const struct attribute_group skx_upi_uncore_format_group = {
3747         .name = "format",
3748         .attrs = skx_upi_uncore_formats_attr,
3749 };
3750
3751 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3752 {
3753         struct pci_dev *pdev = box->pci_dev;
3754
3755         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3756         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3757 }
3758
3759 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3760         .init_box       = skx_upi_uncore_pci_init_box,
3761         .disable_box    = snbep_uncore_pci_disable_box,
3762         .enable_box     = snbep_uncore_pci_enable_box,
3763         .disable_event  = snbep_uncore_pci_disable_event,
3764         .enable_event   = snbep_uncore_pci_enable_event,
3765         .read_counter   = snbep_uncore_pci_read_counter,
3766 };
3767
3768 static struct intel_uncore_type skx_uncore_upi = {
3769         .name           = "upi",
3770         .num_counters   = 4,
3771         .num_boxes      = 3,
3772         .perf_ctr_bits  = 48,
3773         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3774         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3775         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3776         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3777         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3778         .ops            = &skx_upi_uncore_pci_ops,
3779         .format_group   = &skx_upi_uncore_format_group,
3780 };
3781
3782 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3783 {
3784         struct pci_dev *pdev = box->pci_dev;
3785
3786         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3787         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3788 }
3789
3790 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3791         .init_box       = skx_m2m_uncore_pci_init_box,
3792         .disable_box    = snbep_uncore_pci_disable_box,
3793         .enable_box     = snbep_uncore_pci_enable_box,
3794         .disable_event  = snbep_uncore_pci_disable_event,
3795         .enable_event   = snbep_uncore_pci_enable_event,
3796         .read_counter   = snbep_uncore_pci_read_counter,
3797 };
3798
3799 static struct intel_uncore_type skx_uncore_m2m = {
3800         .name           = "m2m",
3801         .num_counters   = 4,
3802         .num_boxes      = 2,
3803         .perf_ctr_bits  = 48,
3804         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3805         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3806         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3807         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3808         .ops            = &skx_m2m_uncore_pci_ops,
3809         .format_group   = &skx_uncore_format_group,
3810 };
3811
3812 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3813         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3814         EVENT_CONSTRAINT_END
3815 };
3816
3817 static struct intel_uncore_type skx_uncore_m2pcie = {
3818         .name           = "m2pcie",
3819         .num_counters   = 4,
3820         .num_boxes      = 4,
3821         .perf_ctr_bits  = 48,
3822         .constraints    = skx_uncore_m2pcie_constraints,
3823         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3824         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3825         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3826         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3827         .ops            = &ivbep_uncore_pci_ops,
3828         .format_group   = &skx_uncore_format_group,
3829 };
3830
3831 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3832         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3833         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3834         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3835         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3836         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3837         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3838         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3839         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3840         EVENT_CONSTRAINT_END
3841 };
3842
3843 static struct intel_uncore_type skx_uncore_m3upi = {
3844         .name           = "m3upi",
3845         .num_counters   = 3,
3846         .num_boxes      = 3,
3847         .perf_ctr_bits  = 48,
3848         .constraints    = skx_uncore_m3upi_constraints,
3849         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3850         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3851         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3852         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3853         .ops            = &ivbep_uncore_pci_ops,
3854         .format_group   = &skx_uncore_format_group,
3855 };
3856
3857 enum {
3858         SKX_PCI_UNCORE_IMC,
3859         SKX_PCI_UNCORE_M2M,
3860         SKX_PCI_UNCORE_UPI,
3861         SKX_PCI_UNCORE_M2PCIE,
3862         SKX_PCI_UNCORE_M3UPI,
3863 };
3864
3865 static struct intel_uncore_type *skx_pci_uncores[] = {
3866         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3867         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3868         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3869         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3870         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3871         NULL,
3872 };
3873
3874 static const struct pci_device_id skx_uncore_pci_ids[] = {
3875         { /* MC0 Channel 0 */
3876                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3877                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3878         },
3879         { /* MC0 Channel 1 */
3880                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3881                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3882         },
3883         { /* MC0 Channel 2 */
3884                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3885                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3886         },
3887         { /* MC1 Channel 0 */
3888                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3889                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3890         },
3891         { /* MC1 Channel 1 */
3892                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3893                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3894         },
3895         { /* MC1 Channel 2 */
3896                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3897                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3898         },
3899         { /* M2M0 */
3900                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3901                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3902         },
3903         { /* M2M1 */
3904                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3905                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3906         },
3907         { /* UPI0 Link 0 */
3908                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3909                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3910         },
3911         { /* UPI0 Link 1 */
3912                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3913                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3914         },
3915         { /* UPI1 Link 2 */
3916                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3917                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3918         },
3919         { /* M2PCIe 0 */
3920                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3921                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3922         },
3923         { /* M2PCIe 1 */
3924                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3925                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3926         },
3927         { /* M2PCIe 2 */
3928                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3929                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3930         },
3931         { /* M2PCIe 3 */
3932                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3933                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3934         },
3935         { /* M3UPI0 Link 0 */
3936                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3937                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
3938         },
3939         { /* M3UPI0 Link 1 */
3940                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
3941                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
3942         },
3943         { /* M3UPI1 Link 2 */
3944                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3945                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
3946         },
3947         { /* end: all zeroes */ }
3948 };
3949
3950
3951 static struct pci_driver skx_uncore_pci_driver = {
3952         .name           = "skx_uncore",
3953         .id_table       = skx_uncore_pci_ids,
3954 };
3955
3956 int skx_uncore_pci_init(void)
3957 {
3958         /* need to double check pci address */
3959         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3960
3961         if (ret)
3962                 return ret;
3963
3964         uncore_pci_uncores = skx_pci_uncores;
3965         uncore_pci_driver = &skx_uncore_pci_driver;
3966         return 0;
3967 }
3968
3969 /* end of SKX uncore support */