x86: Remove unusual Unicode characters from comments
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID                 0x40
7 #define SNBEP_GIDNIDMAP                 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
16                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
20 #define SNBEP_PMON_CTL_RST              (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
23 #define SNBEP_PMON_CTL_EN               (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
27                                          SNBEP_PMON_CTL_UMASK_MASK | \
28                                          SNBEP_PMON_CTL_EDGE_DET | \
29                                          SNBEP_PMON_CTL_INVERT | \
30                                          SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
35                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36                                  SNBEP_PMON_CTL_UMASK_MASK | \
37                                  SNBEP_PMON_CTL_EDGE_DET | \
38                                  SNBEP_PMON_CTL_INVERT | \
39                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
43                                                  SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
51                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53                                  SNBEP_PMON_CTL_EDGE_DET | \
54                                  SNBEP_PMON_CTL_INVERT | \
55                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
60                                 (SNBEP_PMON_RAW_EVENT_MASK | \
61                                  SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
65 #define SNBEP_PCI_PMON_CTL0                     0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0                     0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
84 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
94 #define SNBEP_CBO_MSR_OFFSET                    0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
102         .event = (e),                           \
103         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
104         .config_mask = (m),                     \
105         .idx = (i)                              \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
121                                          SNBEP_PMON_CTL_UMASK_MASK | \
122                                          SNBEP_PMON_CTL_EDGE_DET | \
123                                          SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131                                  SNBEP_PMON_CTL_UMASK_MASK | \
132                                  SNBEP_PMON_CTL_EDGE_DET | \
133                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
136                                                  SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
150                                 (IVBEP_PMON_RAW_EVENT_MASK | \
151                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
154                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156                                  SNBEP_PMON_CTL_EDGE_DET | \
157                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
162                                 (IVBEP_PMON_RAW_EVENT_MASK | \
163                                  SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166                                 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0                   0x709
170 #define HSWEP_U_MSR_PMON_CTL0                   0x705
171 #define HSWEP_U_MSR_PMON_FILTER                 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
187 #define HSWEP_CBO_MSR_OFFSET                    0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
202 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
204 #define HSWEP_SBOX_MSR_OFFSET                   0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
206                                                  SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217                                                 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET                      0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223                                          KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
233 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
237 #define KNL_PMON_FIXED_CTL_EN                   0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
256                                                  KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265                                  SNBEP_PMON_CTL_EDGE_DET | \
266                                  SNBEP_CBO_PMON_CTL_TID_EN | \
267                                  SNBEP_PMON_CTL_INVERT | \
268                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID                   0xc0
274 #define SKX_GIDNIDMAP                   0xd4
275
276 /*
277  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278  * that BIOS programmed. MSR has package scope.
279  * |  Bit  |  Default  |  Description
280  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
281  *                       numbers have been initialized. (RO)
282  * |[62:48]|    ---    | Reserved
283  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
284  *                       CPUBUSNO(5). (RO)
285  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
286  *                       CPUBUSNO(4). (RO)
287  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
288  *                       CPUBUSNO(3). (RO)
289  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
290  *                       CPUBUSNO(2). (RO)
291  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
292  *                       CPUBUSNO(1). (RO)
293  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
294  *                       CPUBUSNO(0). (RO)
295  */
296 #define SKX_MSR_CPU_BUS_NUMBER          0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT       (1ULL << 63)
298 #define BUS_NUM_STRIDE                  8
299
300 /* SKX CHA */
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
314
315 /* SKX IIO */
316 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
319 #define SKX_IIO_MSR_OFFSET              0x20
320
321 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
323 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
326                                          SNBEP_PMON_CTL_UMASK_MASK | \
327                                          SNBEP_PMON_CTL_EDGE_DET | \
328                                          SNBEP_PMON_CTL_INVERT | \
329                                          SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331                                          SKX_PMON_CTL_CH_MASK | \
332                                          SKX_PMON_CTL_FC_MASK)
333
334 /* SKX IRP */
335 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
338 #define SKX_IRP_MSR_OFFSET              0x20
339
340 /* SKX UPI */
341 #define SKX_UPI_PCI_PMON_CTL0           0x350
342 #define SKX_UPI_PCI_PMON_CTR0           0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
344 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
345
346 /* SKX M2M */
347 #define SKX_M2M_PCI_PMON_CTL0           0x228
348 #define SKX_M2M_PCI_PMON_CTR0           0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
350
351 /* SNR Ubox */
352 #define SNR_U_MSR_PMON_CTR0                     0x1f98
353 #define SNR_U_MSR_PMON_CTL0                     0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL           0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR           0x1f94
356
357 /* SNR CHA */
358 #define SNR_CHA_RAW_EVENT_MASK_EXT              0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0                   0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0                   0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL                0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0             0x1c05
363
364
365 /* SNR IIO */
366 #define SNR_IIO_MSR_PMON_CTL0                   0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0                   0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL                0x1e00
369 #define SNR_IIO_MSR_OFFSET                      0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT         0x7ffff
371
372 /* SNR IRP */
373 #define SNR_IRP0_MSR_PMON_CTL0                  0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0                  0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL               0x1ea0
376 #define SNR_IRP_MSR_OFFSET                      0x10
377
378 /* SNR M2PCIE */
379 #define SNR_M2PCIE_MSR_PMON_CTL0                0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0                0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL             0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET                   0x10
383
384 /* SNR PCU */
385 #define SNR_PCU_MSR_PMON_CTL0                   0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0                   0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL                0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER             0x1efc
389
390 /* SNR M2M */
391 #define SNR_M2M_PCI_PMON_CTL0                   0x468
392 #define SNR_M2M_PCI_PMON_CTR0                   0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL                0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT              0xff
395
396 /* SNR PCIE3 */
397 #define SNR_PCIE3_PCI_PMON_CTL0                 0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0                 0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL              0x4e0
400
401 /* SNR IMC */
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL             0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR             0x38
404 #define SNR_IMC_MMIO_PMON_CTL0                  0x40
405 #define SNR_IMC_MMIO_PMON_CTR0                  0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL               0x22800
407 #define SNR_IMC_MMIO_OFFSET                     0x4000
408 #define SNR_IMC_MMIO_SIZE                       0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET                0xd0
410 #define SNR_IMC_MMIO_BASE_MASK                  0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET                0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK                  0x7FF
413
414 /* ICX CHA */
415 #define ICX_C34_MSR_PMON_CTR0                   0xb68
416 #define ICX_C34_MSR_PMON_CTL0                   0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL                0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0            0xb65
419
420 /* ICX IIO */
421 #define ICX_IIO_MSR_PMON_CTL0                   0xa58
422 #define ICX_IIO_MSR_PMON_CTR0                   0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL                0xa50
424
425 /* ICX IRP */
426 #define ICX_IRP0_MSR_PMON_CTL0                  0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0                  0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL               0xa4a
429
430 /* ICX M2PCIE */
431 #define ICX_M2PCIE_MSR_PMON_CTL0                0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0                0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL             0xa40
434
435 /* ICX UPI */
436 #define ICX_UPI_PCI_PMON_CTL0                   0x350
437 #define ICX_UPI_PCI_PMON_CTR0                   0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL                0x318
439 #define ICX_UPI_CTL_UMASK_EXT                   0xffffff
440
441 /* ICX M3UPI*/
442 #define ICX_M3UPI_PCI_PMON_CTL0                 0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0                 0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL              0xa0
445
446 /* ICX IMC */
447 #define ICX_NUMBER_IMC_CHN                      2
448 #define ICX_IMC_MEM_STRIDE                      0x4
449
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
528
529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
530 {
531         struct pci_dev *pdev = box->pci_dev;
532         int box_ctl = uncore_pci_box_ctl(box);
533         u32 config = 0;
534
535         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536                 config |= SNBEP_PMON_BOX_CTL_FRZ;
537                 pci_write_config_dword(pdev, box_ctl, config);
538         }
539 }
540
541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
542 {
543         struct pci_dev *pdev = box->pci_dev;
544         int box_ctl = uncore_pci_box_ctl(box);
545         u32 config = 0;
546
547         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549                 pci_write_config_dword(pdev, box_ctl, config);
550         }
551 }
552
553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
554 {
555         struct pci_dev *pdev = box->pci_dev;
556         struct hw_perf_event *hwc = &event->hw;
557
558         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
559 }
560
561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
562 {
563         struct pci_dev *pdev = box->pci_dev;
564         struct hw_perf_event *hwc = &event->hw;
565
566         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
567 }
568
569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
570 {
571         struct pci_dev *pdev = box->pci_dev;
572         struct hw_perf_event *hwc = &event->hw;
573         u64 count = 0;
574
575         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
577
578         return count;
579 }
580
581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
582 {
583         struct pci_dev *pdev = box->pci_dev;
584         int box_ctl = uncore_pci_box_ctl(box);
585
586         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
587 }
588
589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
590 {
591         u64 config;
592         unsigned msr;
593
594         msr = uncore_msr_box_ctl(box);
595         if (msr) {
596                 rdmsrl(msr, config);
597                 config |= SNBEP_PMON_BOX_CTL_FRZ;
598                 wrmsrl(msr, config);
599         }
600 }
601
602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
603 {
604         u64 config;
605         unsigned msr;
606
607         msr = uncore_msr_box_ctl(box);
608         if (msr) {
609                 rdmsrl(msr, config);
610                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
611                 wrmsrl(msr, config);
612         }
613 }
614
615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
616 {
617         struct hw_perf_event *hwc = &event->hw;
618         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
619
620         if (reg1->idx != EXTRA_REG_NONE)
621                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
622
623         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
624 }
625
626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627                                         struct perf_event *event)
628 {
629         struct hw_perf_event *hwc = &event->hw;
630
631         wrmsrl(hwc->config_base, hwc->config);
632 }
633
634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
635 {
636         unsigned msr = uncore_msr_box_ctl(box);
637
638         if (msr)
639                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
640 }
641
642 static struct attribute *snbep_uncore_formats_attr[] = {
643         &format_attr_event.attr,
644         &format_attr_umask.attr,
645         &format_attr_edge.attr,
646         &format_attr_inv.attr,
647         &format_attr_thresh8.attr,
648         NULL,
649 };
650
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652         &format_attr_event.attr,
653         &format_attr_umask.attr,
654         &format_attr_edge.attr,
655         &format_attr_inv.attr,
656         &format_attr_thresh5.attr,
657         NULL,
658 };
659
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661         &format_attr_event.attr,
662         &format_attr_umask.attr,
663         &format_attr_edge.attr,
664         &format_attr_tid_en.attr,
665         &format_attr_inv.attr,
666         &format_attr_thresh8.attr,
667         &format_attr_filter_tid.attr,
668         &format_attr_filter_nid.attr,
669         &format_attr_filter_state.attr,
670         &format_attr_filter_opc.attr,
671         NULL,
672 };
673
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675         &format_attr_event.attr,
676         &format_attr_occ_sel.attr,
677         &format_attr_edge.attr,
678         &format_attr_inv.attr,
679         &format_attr_thresh5.attr,
680         &format_attr_occ_invert.attr,
681         &format_attr_occ_edge.attr,
682         &format_attr_filter_band0.attr,
683         &format_attr_filter_band1.attr,
684         &format_attr_filter_band2.attr,
685         &format_attr_filter_band3.attr,
686         NULL,
687 };
688
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690         &format_attr_event_ext.attr,
691         &format_attr_umask.attr,
692         &format_attr_edge.attr,
693         &format_attr_inv.attr,
694         &format_attr_thresh8.attr,
695         &format_attr_match_rds.attr,
696         &format_attr_match_rnid30.attr,
697         &format_attr_match_rnid4.attr,
698         &format_attr_match_dnid.attr,
699         &format_attr_match_mc.attr,
700         &format_attr_match_opc.attr,
701         &format_attr_match_vnw.attr,
702         &format_attr_match0.attr,
703         &format_attr_match1.attr,
704         &format_attr_mask_rds.attr,
705         &format_attr_mask_rnid30.attr,
706         &format_attr_mask_rnid4.attr,
707         &format_attr_mask_dnid.attr,
708         &format_attr_mask_mc.attr,
709         &format_attr_mask_opc.attr,
710         &format_attr_mask_vnw.attr,
711         &format_attr_mask0.attr,
712         &format_attr_mask1.attr,
713         NULL,
714 };
715
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
718         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
719         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724         { /* end: all zeroes */ },
725 };
726
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
729         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
731         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
732         { /* end: all zeroes */ },
733 };
734
735 static const struct attribute_group snbep_uncore_format_group = {
736         .name = "format",
737         .attrs = snbep_uncore_formats_attr,
738 };
739
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
741         .name = "format",
742         .attrs = snbep_uncore_ubox_formats_attr,
743 };
744
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
746         .name = "format",
747         .attrs = snbep_uncore_cbox_formats_attr,
748 };
749
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
751         .name = "format",
752         .attrs = snbep_uncore_pcu_formats_attr,
753 };
754
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
756         .name = "format",
757         .attrs = snbep_uncore_qpi_formats_attr,
758 };
759
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
761         .disable_box    = snbep_uncore_msr_disable_box,         \
762         .enable_box     = snbep_uncore_msr_enable_box,          \
763         .disable_event  = snbep_uncore_msr_disable_event,       \
764         .enable_event   = snbep_uncore_msr_enable_event,        \
765         .read_counter   = uncore_msr_read_counter
766
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
768         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
769         .init_box       = snbep_uncore_msr_init_box             \
770
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
773 };
774
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
776         .init_box       = snbep_uncore_pci_init_box,            \
777         .disable_box    = snbep_uncore_pci_disable_box,         \
778         .enable_box     = snbep_uncore_pci_enable_box,          \
779         .disable_event  = snbep_uncore_pci_disable_event,       \
780         .read_counter   = snbep_uncore_pci_read_counter
781
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784         .enable_event   = snbep_uncore_pci_enable_event,        \
785 };
786
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
814         EVENT_CONSTRAINT_END
815 };
816
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828         EVENT_CONSTRAINT_END
829 };
830
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
860         EVENT_CONSTRAINT_END
861 };
862
863 static struct intel_uncore_type snbep_uncore_ubox = {
864         .name           = "ubox",
865         .num_counters   = 2,
866         .num_boxes      = 1,
867         .perf_ctr_bits  = 44,
868         .fixed_ctr_bits = 48,
869         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
870         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
871         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874         .ops            = &snbep_uncore_msr_ops,
875         .format_group   = &snbep_uncore_ubox_format_group,
876 };
877
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
904         EVENT_EXTRA_END
905 };
906
907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
908 {
909         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
911         int i;
912
913         if (uncore_box_is_fake(box))
914                 return;
915
916         for (i = 0; i < 5; i++) {
917                 if (reg1->alloc & (0x1 << i))
918                         atomic_sub(1 << (i * 6), &er->ref);
919         }
920         reg1->alloc = 0;
921 }
922
923 static struct event_constraint *
924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925                             u64 (*cbox_filter_mask)(int fields))
926 {
927         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
929         int i, alloc = 0;
930         unsigned long flags;
931         u64 mask;
932
933         if (reg1->idx == EXTRA_REG_NONE)
934                 return NULL;
935
936         raw_spin_lock_irqsave(&er->lock, flags);
937         for (i = 0; i < 5; i++) {
938                 if (!(reg1->idx & (0x1 << i)))
939                         continue;
940                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
941                         continue;
942
943                 mask = cbox_filter_mask(0x1 << i);
944                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945                     !((reg1->config ^ er->config) & mask)) {
946                         atomic_add(1 << (i * 6), &er->ref);
947                         er->config &= ~mask;
948                         er->config |= reg1->config & mask;
949                         alloc |= (0x1 << i);
950                 } else {
951                         break;
952                 }
953         }
954         raw_spin_unlock_irqrestore(&er->lock, flags);
955         if (i < 5)
956                 goto fail;
957
958         if (!uncore_box_is_fake(box))
959                 reg1->alloc |= alloc;
960
961         return NULL;
962 fail:
963         for (; i >= 0; i--) {
964                 if (alloc & (0x1 << i))
965                         atomic_sub(1 << (i * 6), &er->ref);
966         }
967         return &uncore_constraint_empty;
968 }
969
970 static u64 snbep_cbox_filter_mask(int fields)
971 {
972         u64 mask = 0;
973
974         if (fields & 0x1)
975                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
976         if (fields & 0x2)
977                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
978         if (fields & 0x4)
979                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
980         if (fields & 0x8)
981                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
982
983         return mask;
984 }
985
986 static struct event_constraint *
987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
988 {
989         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
990 }
991
992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
993 {
994         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995         struct extra_reg *er;
996         int idx = 0;
997
998         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999                 if (er->event != (event->hw.config & er->config_mask))
1000                         continue;
1001                 idx |= er->idx;
1002         }
1003
1004         if (idx) {
1005                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1008                 reg1->idx = idx;
1009         }
1010         return 0;
1011 }
1012
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015         .hw_config              = snbep_cbox_hw_config,
1016         .get_constraint         = snbep_cbox_get_constraint,
1017         .put_constraint         = snbep_cbox_put_constraint,
1018 };
1019
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1021         .name                   = "cbox",
1022         .num_counters           = 4,
1023         .num_boxes              = 8,
1024         .perf_ctr_bits          = 44,
1025         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1026         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1027         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1029         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1030         .num_shared_regs        = 1,
1031         .constraints            = snbep_uncore_cbox_constraints,
1032         .ops                    = &snbep_uncore_cbox_ops,
1033         .format_group           = &snbep_uncore_cbox_format_group,
1034 };
1035
1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1037 {
1038         struct hw_perf_event *hwc = &event->hw;
1039         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040         u64 config = reg1->config;
1041
1042         if (new_idx > reg1->idx)
1043                 config <<= 8 * (new_idx - reg1->idx);
1044         else
1045                 config >>= 8 * (reg1->idx - new_idx);
1046
1047         if (modify) {
1048                 hwc->config += new_idx - reg1->idx;
1049                 reg1->config = config;
1050                 reg1->idx = new_idx;
1051         }
1052         return config;
1053 }
1054
1055 static struct event_constraint *
1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1057 {
1058         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060         unsigned long flags;
1061         int idx = reg1->idx;
1062         u64 mask, config1 = reg1->config;
1063         bool ok = false;
1064
1065         if (reg1->idx == EXTRA_REG_NONE ||
1066             (!uncore_box_is_fake(box) && reg1->alloc))
1067                 return NULL;
1068 again:
1069         mask = 0xffULL << (idx * 8);
1070         raw_spin_lock_irqsave(&er->lock, flags);
1071         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072             !((config1 ^ er->config) & mask)) {
1073                 atomic_add(1 << (idx * 8), &er->ref);
1074                 er->config &= ~mask;
1075                 er->config |= config1 & mask;
1076                 ok = true;
1077         }
1078         raw_spin_unlock_irqrestore(&er->lock, flags);
1079
1080         if (!ok) {
1081                 idx = (idx + 1) % 4;
1082                 if (idx != reg1->idx) {
1083                         config1 = snbep_pcu_alter_er(event, idx, false);
1084                         goto again;
1085                 }
1086                 return &uncore_constraint_empty;
1087         }
1088
1089         if (!uncore_box_is_fake(box)) {
1090                 if (idx != reg1->idx)
1091                         snbep_pcu_alter_er(event, idx, true);
1092                 reg1->alloc = 1;
1093         }
1094         return NULL;
1095 }
1096
1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1098 {
1099         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1101
1102         if (uncore_box_is_fake(box) || !reg1->alloc)
1103                 return;
1104
1105         atomic_sub(1 << (reg1->idx * 8), &er->ref);
1106         reg1->alloc = 0;
1107 }
1108
1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1110 {
1111         struct hw_perf_event *hwc = &event->hw;
1112         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1114
1115         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117                 reg1->idx = ev_sel - 0xb;
1118                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1119         }
1120         return 0;
1121 }
1122
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125         .hw_config              = snbep_pcu_hw_config,
1126         .get_constraint         = snbep_pcu_get_constraint,
1127         .put_constraint         = snbep_pcu_put_constraint,
1128 };
1129
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1131         .name                   = "pcu",
1132         .num_counters           = 4,
1133         .num_boxes              = 1,
1134         .perf_ctr_bits          = 48,
1135         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1136         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1137         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1139         .num_shared_regs        = 1,
1140         .ops                    = &snbep_uncore_pcu_ops,
1141         .format_group           = &snbep_uncore_pcu_format_group,
1142 };
1143
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1145         &snbep_uncore_ubox,
1146         &snbep_uncore_cbox,
1147         &snbep_uncore_pcu,
1148         NULL,
1149 };
1150
1151 void snbep_uncore_cpu_init(void)
1152 {
1153         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155         uncore_msr_uncores = snbep_msr_uncores;
1156 }
1157
1158 enum {
1159         SNBEP_PCI_QPI_PORT0_FILTER,
1160         SNBEP_PCI_QPI_PORT1_FILTER,
1161         BDX_PCI_QPI_PORT2_FILTER,
1162         HSWEP_PCI_PCU_3,
1163 };
1164
1165 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1166 {
1167         struct hw_perf_event *hwc = &event->hw;
1168         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1169         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1170
1171         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1172                 reg1->idx = 0;
1173                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1174                 reg1->config = event->attr.config1;
1175                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1176                 reg2->config = event->attr.config2;
1177         }
1178         return 0;
1179 }
1180
1181 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1182 {
1183         struct pci_dev *pdev = box->pci_dev;
1184         struct hw_perf_event *hwc = &event->hw;
1185         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1186         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1187
1188         if (reg1->idx != EXTRA_REG_NONE) {
1189                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1190                 int die = box->dieid;
1191                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1192
1193                 if (filter_pdev) {
1194                         pci_write_config_dword(filter_pdev, reg1->reg,
1195                                                 (u32)reg1->config);
1196                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1197                                                 (u32)(reg1->config >> 32));
1198                         pci_write_config_dword(filter_pdev, reg2->reg,
1199                                                 (u32)reg2->config);
1200                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1201                                                 (u32)(reg2->config >> 32));
1202                 }
1203         }
1204
1205         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1206 }
1207
1208 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1209         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1210         .enable_event           = snbep_qpi_enable_event,
1211         .hw_config              = snbep_qpi_hw_config,
1212         .get_constraint         = uncore_get_constraint,
1213         .put_constraint         = uncore_put_constraint,
1214 };
1215
1216 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1217         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1218         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1219         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1220         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1221         .ops            = &snbep_uncore_pci_ops,                \
1222         .format_group   = &snbep_uncore_format_group
1223
1224 static struct intel_uncore_type snbep_uncore_ha = {
1225         .name           = "ha",
1226         .num_counters   = 4,
1227         .num_boxes      = 1,
1228         .perf_ctr_bits  = 48,
1229         SNBEP_UNCORE_PCI_COMMON_INIT(),
1230 };
1231
1232 static struct intel_uncore_type snbep_uncore_imc = {
1233         .name           = "imc",
1234         .num_counters   = 4,
1235         .num_boxes      = 4,
1236         .perf_ctr_bits  = 48,
1237         .fixed_ctr_bits = 48,
1238         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1239         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1240         .event_descs    = snbep_uncore_imc_events,
1241         SNBEP_UNCORE_PCI_COMMON_INIT(),
1242 };
1243
1244 static struct intel_uncore_type snbep_uncore_qpi = {
1245         .name                   = "qpi",
1246         .num_counters           = 4,
1247         .num_boxes              = 2,
1248         .perf_ctr_bits          = 48,
1249         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1250         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1251         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1252         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1253         .num_shared_regs        = 1,
1254         .ops                    = &snbep_uncore_qpi_ops,
1255         .event_descs            = snbep_uncore_qpi_events,
1256         .format_group           = &snbep_uncore_qpi_format_group,
1257 };
1258
1259
1260 static struct intel_uncore_type snbep_uncore_r2pcie = {
1261         .name           = "r2pcie",
1262         .num_counters   = 4,
1263         .num_boxes      = 1,
1264         .perf_ctr_bits  = 44,
1265         .constraints    = snbep_uncore_r2pcie_constraints,
1266         SNBEP_UNCORE_PCI_COMMON_INIT(),
1267 };
1268
1269 static struct intel_uncore_type snbep_uncore_r3qpi = {
1270         .name           = "r3qpi",
1271         .num_counters   = 3,
1272         .num_boxes      = 2,
1273         .perf_ctr_bits  = 44,
1274         .constraints    = snbep_uncore_r3qpi_constraints,
1275         SNBEP_UNCORE_PCI_COMMON_INIT(),
1276 };
1277
1278 enum {
1279         SNBEP_PCI_UNCORE_HA,
1280         SNBEP_PCI_UNCORE_IMC,
1281         SNBEP_PCI_UNCORE_QPI,
1282         SNBEP_PCI_UNCORE_R2PCIE,
1283         SNBEP_PCI_UNCORE_R3QPI,
1284 };
1285
1286 static struct intel_uncore_type *snbep_pci_uncores[] = {
1287         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1288         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1289         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1290         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1291         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1292         NULL,
1293 };
1294
1295 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1296         { /* Home Agent */
1297                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1298                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1299         },
1300         { /* MC Channel 0 */
1301                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1302                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1303         },
1304         { /* MC Channel 1 */
1305                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1306                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1307         },
1308         { /* MC Channel 2 */
1309                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1310                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1311         },
1312         { /* MC Channel 3 */
1313                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1314                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1315         },
1316         { /* QPI Port 0 */
1317                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1318                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1319         },
1320         { /* QPI Port 1 */
1321                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1322                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1323         },
1324         { /* R2PCIe */
1325                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1326                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1327         },
1328         { /* R3QPI Link 0 */
1329                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1330                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1331         },
1332         { /* R3QPI Link 1 */
1333                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1334                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1335         },
1336         { /* QPI Port 0 filter  */
1337                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1338                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1339                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1340         },
1341         { /* QPI Port 0 filter  */
1342                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1343                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1344                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1345         },
1346         { /* end: all zeroes */ }
1347 };
1348
1349 static struct pci_driver snbep_uncore_pci_driver = {
1350         .name           = "snbep_uncore",
1351         .id_table       = snbep_uncore_pci_ids,
1352 };
1353
1354 #define NODE_ID_MASK    0x7
1355
1356 /*
1357  * build pci bus to socket mapping
1358  */
1359 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1360 {
1361         struct pci_dev *ubox_dev = NULL;
1362         int i, bus, nodeid, segment, die_id;
1363         struct pci2phy_map *map;
1364         int err = 0;
1365         u32 config = 0;
1366
1367         while (1) {
1368                 /* find the UBOX device */
1369                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1370                 if (!ubox_dev)
1371                         break;
1372                 bus = ubox_dev->bus->number;
1373                 /*
1374                  * The nodeid and idmap registers only contain enough
1375                  * information to handle 8 nodes.  On systems with more
1376                  * than 8 nodes, we need to rely on NUMA information,
1377                  * filled in from BIOS supplied information, to determine
1378                  * the topology.
1379                  */
1380                 if (nr_node_ids <= 8) {
1381                         /* get the Node ID of the local register */
1382                         err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1383                         if (err)
1384                                 break;
1385                         nodeid = config & NODE_ID_MASK;
1386                         /* get the Node ID mapping */
1387                         err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1388                         if (err)
1389                                 break;
1390
1391                         segment = pci_domain_nr(ubox_dev->bus);
1392                         raw_spin_lock(&pci2phy_map_lock);
1393                         map = __find_pci2phy_map(segment);
1394                         if (!map) {
1395                                 raw_spin_unlock(&pci2phy_map_lock);
1396                                 err = -ENOMEM;
1397                                 break;
1398                         }
1399
1400                         /*
1401                          * every three bits in the Node ID mapping register maps
1402                          * to a particular node.
1403                          */
1404                         for (i = 0; i < 8; i++) {
1405                                 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1406                                         if (topology_max_die_per_package() > 1)
1407                                                 die_id = i;
1408                                         else
1409                                                 die_id = topology_phys_to_logical_pkg(i);
1410                                         map->pbus_to_dieid[bus] = die_id;
1411                                         break;
1412                                 }
1413                         }
1414                         raw_spin_unlock(&pci2phy_map_lock);
1415                 } else {
1416                         int node = pcibus_to_node(ubox_dev->bus);
1417                         int cpu;
1418
1419                         segment = pci_domain_nr(ubox_dev->bus);
1420                         raw_spin_lock(&pci2phy_map_lock);
1421                         map = __find_pci2phy_map(segment);
1422                         if (!map) {
1423                                 raw_spin_unlock(&pci2phy_map_lock);
1424                                 err = -ENOMEM;
1425                                 break;
1426                         }
1427
1428                         die_id = -1;
1429                         for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1430                                 struct cpuinfo_x86 *c = &cpu_data(cpu);
1431
1432                                 if (c->initialized && cpu_to_node(cpu) == node) {
1433                                         map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1434                                         break;
1435                                 }
1436                         }
1437                         raw_spin_unlock(&pci2phy_map_lock);
1438
1439                         if (WARN_ON_ONCE(die_id == -1)) {
1440                                 err = -EINVAL;
1441                                 break;
1442                         }
1443                 }
1444         }
1445
1446         if (!err) {
1447                 /*
1448                  * For PCI bus with no UBOX device, find the next bus
1449                  * that has UBOX device and use its mapping.
1450                  */
1451                 raw_spin_lock(&pci2phy_map_lock);
1452                 list_for_each_entry(map, &pci2phy_map_head, list) {
1453                         i = -1;
1454                         if (reverse) {
1455                                 for (bus = 255; bus >= 0; bus--) {
1456                                         if (map->pbus_to_dieid[bus] >= 0)
1457                                                 i = map->pbus_to_dieid[bus];
1458                                         else
1459                                                 map->pbus_to_dieid[bus] = i;
1460                                 }
1461                         } else {
1462                                 for (bus = 0; bus <= 255; bus++) {
1463                                         if (map->pbus_to_dieid[bus] >= 0)
1464                                                 i = map->pbus_to_dieid[bus];
1465                                         else
1466                                                 map->pbus_to_dieid[bus] = i;
1467                                 }
1468                         }
1469                 }
1470                 raw_spin_unlock(&pci2phy_map_lock);
1471         }
1472
1473         pci_dev_put(ubox_dev);
1474
1475         return err ? pcibios_err_to_errno(err) : 0;
1476 }
1477
1478 int snbep_uncore_pci_init(void)
1479 {
1480         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1481         if (ret)
1482                 return ret;
1483         uncore_pci_uncores = snbep_pci_uncores;
1484         uncore_pci_driver = &snbep_uncore_pci_driver;
1485         return 0;
1486 }
1487 /* end of Sandy Bridge-EP uncore support */
1488
1489 /* IvyTown uncore support */
1490 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1491 {
1492         unsigned msr = uncore_msr_box_ctl(box);
1493         if (msr)
1494                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1495 }
1496
1497 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1498 {
1499         struct pci_dev *pdev = box->pci_dev;
1500
1501         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1502 }
1503
1504 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1505         .init_box       = ivbep_uncore_msr_init_box,            \
1506         .disable_box    = snbep_uncore_msr_disable_box,         \
1507         .enable_box     = snbep_uncore_msr_enable_box,          \
1508         .disable_event  = snbep_uncore_msr_disable_event,       \
1509         .enable_event   = snbep_uncore_msr_enable_event,        \
1510         .read_counter   = uncore_msr_read_counter
1511
1512 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1513         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1514 };
1515
1516 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1517         .init_box       = ivbep_uncore_pci_init_box,
1518         .disable_box    = snbep_uncore_pci_disable_box,
1519         .enable_box     = snbep_uncore_pci_enable_box,
1520         .disable_event  = snbep_uncore_pci_disable_event,
1521         .enable_event   = snbep_uncore_pci_enable_event,
1522         .read_counter   = snbep_uncore_pci_read_counter,
1523 };
1524
1525 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1526         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1527         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1528         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1529         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1530         .ops            = &ivbep_uncore_pci_ops,                        \
1531         .format_group   = &ivbep_uncore_format_group
1532
1533 static struct attribute *ivbep_uncore_formats_attr[] = {
1534         &format_attr_event.attr,
1535         &format_attr_umask.attr,
1536         &format_attr_edge.attr,
1537         &format_attr_inv.attr,
1538         &format_attr_thresh8.attr,
1539         NULL,
1540 };
1541
1542 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1543         &format_attr_event.attr,
1544         &format_attr_umask.attr,
1545         &format_attr_edge.attr,
1546         &format_attr_inv.attr,
1547         &format_attr_thresh5.attr,
1548         NULL,
1549 };
1550
1551 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1552         &format_attr_event.attr,
1553         &format_attr_umask.attr,
1554         &format_attr_edge.attr,
1555         &format_attr_tid_en.attr,
1556         &format_attr_thresh8.attr,
1557         &format_attr_filter_tid.attr,
1558         &format_attr_filter_link.attr,
1559         &format_attr_filter_state2.attr,
1560         &format_attr_filter_nid2.attr,
1561         &format_attr_filter_opc2.attr,
1562         &format_attr_filter_nc.attr,
1563         &format_attr_filter_c6.attr,
1564         &format_attr_filter_isoc.attr,
1565         NULL,
1566 };
1567
1568 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1569         &format_attr_event.attr,
1570         &format_attr_occ_sel.attr,
1571         &format_attr_edge.attr,
1572         &format_attr_thresh5.attr,
1573         &format_attr_occ_invert.attr,
1574         &format_attr_occ_edge.attr,
1575         &format_attr_filter_band0.attr,
1576         &format_attr_filter_band1.attr,
1577         &format_attr_filter_band2.attr,
1578         &format_attr_filter_band3.attr,
1579         NULL,
1580 };
1581
1582 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1583         &format_attr_event_ext.attr,
1584         &format_attr_umask.attr,
1585         &format_attr_edge.attr,
1586         &format_attr_thresh8.attr,
1587         &format_attr_match_rds.attr,
1588         &format_attr_match_rnid30.attr,
1589         &format_attr_match_rnid4.attr,
1590         &format_attr_match_dnid.attr,
1591         &format_attr_match_mc.attr,
1592         &format_attr_match_opc.attr,
1593         &format_attr_match_vnw.attr,
1594         &format_attr_match0.attr,
1595         &format_attr_match1.attr,
1596         &format_attr_mask_rds.attr,
1597         &format_attr_mask_rnid30.attr,
1598         &format_attr_mask_rnid4.attr,
1599         &format_attr_mask_dnid.attr,
1600         &format_attr_mask_mc.attr,
1601         &format_attr_mask_opc.attr,
1602         &format_attr_mask_vnw.attr,
1603         &format_attr_mask0.attr,
1604         &format_attr_mask1.attr,
1605         NULL,
1606 };
1607
1608 static const struct attribute_group ivbep_uncore_format_group = {
1609         .name = "format",
1610         .attrs = ivbep_uncore_formats_attr,
1611 };
1612
1613 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1614         .name = "format",
1615         .attrs = ivbep_uncore_ubox_formats_attr,
1616 };
1617
1618 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1619         .name = "format",
1620         .attrs = ivbep_uncore_cbox_formats_attr,
1621 };
1622
1623 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1624         .name = "format",
1625         .attrs = ivbep_uncore_pcu_formats_attr,
1626 };
1627
1628 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1629         .name = "format",
1630         .attrs = ivbep_uncore_qpi_formats_attr,
1631 };
1632
1633 static struct intel_uncore_type ivbep_uncore_ubox = {
1634         .name           = "ubox",
1635         .num_counters   = 2,
1636         .num_boxes      = 1,
1637         .perf_ctr_bits  = 44,
1638         .fixed_ctr_bits = 48,
1639         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1640         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1641         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1642         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1643         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1644         .ops            = &ivbep_uncore_msr_ops,
1645         .format_group   = &ivbep_uncore_ubox_format_group,
1646 };
1647
1648 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1649         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1650                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1651         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1652         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1653         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1654         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1655         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1656         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1657         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1658         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1659         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1660         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1661         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1662         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1663         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1664         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1665         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1666         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1667         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1668         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1669         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1670         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1671         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1672         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1673         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1674         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1675         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1676         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1677         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1678         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1679         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1680         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1681         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1682         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1683         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1684         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1685         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1686         EVENT_EXTRA_END
1687 };
1688
1689 static u64 ivbep_cbox_filter_mask(int fields)
1690 {
1691         u64 mask = 0;
1692
1693         if (fields & 0x1)
1694                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1695         if (fields & 0x2)
1696                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1697         if (fields & 0x4)
1698                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1699         if (fields & 0x8)
1700                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1701         if (fields & 0x10) {
1702                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1703                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1704                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1705                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1706         }
1707
1708         return mask;
1709 }
1710
1711 static struct event_constraint *
1712 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1713 {
1714         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1715 }
1716
1717 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1718 {
1719         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1720         struct extra_reg *er;
1721         int idx = 0;
1722
1723         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1724                 if (er->event != (event->hw.config & er->config_mask))
1725                         continue;
1726                 idx |= er->idx;
1727         }
1728
1729         if (idx) {
1730                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1731                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1732                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1733                 reg1->idx = idx;
1734         }
1735         return 0;
1736 }
1737
1738 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1739 {
1740         struct hw_perf_event *hwc = &event->hw;
1741         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1742
1743         if (reg1->idx != EXTRA_REG_NONE) {
1744                 u64 filter = uncore_shared_reg_config(box, 0);
1745                 wrmsrl(reg1->reg, filter & 0xffffffff);
1746                 wrmsrl(reg1->reg + 6, filter >> 32);
1747         }
1748
1749         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1750 }
1751
1752 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1753         .init_box               = ivbep_uncore_msr_init_box,
1754         .disable_box            = snbep_uncore_msr_disable_box,
1755         .enable_box             = snbep_uncore_msr_enable_box,
1756         .disable_event          = snbep_uncore_msr_disable_event,
1757         .enable_event           = ivbep_cbox_enable_event,
1758         .read_counter           = uncore_msr_read_counter,
1759         .hw_config              = ivbep_cbox_hw_config,
1760         .get_constraint         = ivbep_cbox_get_constraint,
1761         .put_constraint         = snbep_cbox_put_constraint,
1762 };
1763
1764 static struct intel_uncore_type ivbep_uncore_cbox = {
1765         .name                   = "cbox",
1766         .num_counters           = 4,
1767         .num_boxes              = 15,
1768         .perf_ctr_bits          = 44,
1769         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1770         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1771         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1772         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1773         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1774         .num_shared_regs        = 1,
1775         .constraints            = snbep_uncore_cbox_constraints,
1776         .ops                    = &ivbep_uncore_cbox_ops,
1777         .format_group           = &ivbep_uncore_cbox_format_group,
1778 };
1779
1780 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1781         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1782         .hw_config              = snbep_pcu_hw_config,
1783         .get_constraint         = snbep_pcu_get_constraint,
1784         .put_constraint         = snbep_pcu_put_constraint,
1785 };
1786
1787 static struct intel_uncore_type ivbep_uncore_pcu = {
1788         .name                   = "pcu",
1789         .num_counters           = 4,
1790         .num_boxes              = 1,
1791         .perf_ctr_bits          = 48,
1792         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1793         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1794         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1795         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1796         .num_shared_regs        = 1,
1797         .ops                    = &ivbep_uncore_pcu_ops,
1798         .format_group           = &ivbep_uncore_pcu_format_group,
1799 };
1800
1801 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1802         &ivbep_uncore_ubox,
1803         &ivbep_uncore_cbox,
1804         &ivbep_uncore_pcu,
1805         NULL,
1806 };
1807
1808 void ivbep_uncore_cpu_init(void)
1809 {
1810         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1811                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1812         uncore_msr_uncores = ivbep_msr_uncores;
1813 }
1814
1815 static struct intel_uncore_type ivbep_uncore_ha = {
1816         .name           = "ha",
1817         .num_counters   = 4,
1818         .num_boxes      = 2,
1819         .perf_ctr_bits  = 48,
1820         IVBEP_UNCORE_PCI_COMMON_INIT(),
1821 };
1822
1823 static struct intel_uncore_type ivbep_uncore_imc = {
1824         .name           = "imc",
1825         .num_counters   = 4,
1826         .num_boxes      = 8,
1827         .perf_ctr_bits  = 48,
1828         .fixed_ctr_bits = 48,
1829         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1830         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1831         .event_descs    = snbep_uncore_imc_events,
1832         IVBEP_UNCORE_PCI_COMMON_INIT(),
1833 };
1834
1835 /* registers in IRP boxes are not properly aligned */
1836 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1837 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1838
1839 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1840 {
1841         struct pci_dev *pdev = box->pci_dev;
1842         struct hw_perf_event *hwc = &event->hw;
1843
1844         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1845                                hwc->config | SNBEP_PMON_CTL_EN);
1846 }
1847
1848 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1849 {
1850         struct pci_dev *pdev = box->pci_dev;
1851         struct hw_perf_event *hwc = &event->hw;
1852
1853         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1854 }
1855
1856 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1857 {
1858         struct pci_dev *pdev = box->pci_dev;
1859         struct hw_perf_event *hwc = &event->hw;
1860         u64 count = 0;
1861
1862         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1863         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1864
1865         return count;
1866 }
1867
1868 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1869         .init_box       = ivbep_uncore_pci_init_box,
1870         .disable_box    = snbep_uncore_pci_disable_box,
1871         .enable_box     = snbep_uncore_pci_enable_box,
1872         .disable_event  = ivbep_uncore_irp_disable_event,
1873         .enable_event   = ivbep_uncore_irp_enable_event,
1874         .read_counter   = ivbep_uncore_irp_read_counter,
1875 };
1876
1877 static struct intel_uncore_type ivbep_uncore_irp = {
1878         .name                   = "irp",
1879         .num_counters           = 4,
1880         .num_boxes              = 1,
1881         .perf_ctr_bits          = 48,
1882         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1883         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1884         .ops                    = &ivbep_uncore_irp_ops,
1885         .format_group           = &ivbep_uncore_format_group,
1886 };
1887
1888 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1889         .init_box       = ivbep_uncore_pci_init_box,
1890         .disable_box    = snbep_uncore_pci_disable_box,
1891         .enable_box     = snbep_uncore_pci_enable_box,
1892         .disable_event  = snbep_uncore_pci_disable_event,
1893         .enable_event   = snbep_qpi_enable_event,
1894         .read_counter   = snbep_uncore_pci_read_counter,
1895         .hw_config      = snbep_qpi_hw_config,
1896         .get_constraint = uncore_get_constraint,
1897         .put_constraint = uncore_put_constraint,
1898 };
1899
1900 static struct intel_uncore_type ivbep_uncore_qpi = {
1901         .name                   = "qpi",
1902         .num_counters           = 4,
1903         .num_boxes              = 3,
1904         .perf_ctr_bits          = 48,
1905         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1906         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1907         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1908         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1909         .num_shared_regs        = 1,
1910         .ops                    = &ivbep_uncore_qpi_ops,
1911         .format_group           = &ivbep_uncore_qpi_format_group,
1912 };
1913
1914 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1915         .name           = "r2pcie",
1916         .num_counters   = 4,
1917         .num_boxes      = 1,
1918         .perf_ctr_bits  = 44,
1919         .constraints    = snbep_uncore_r2pcie_constraints,
1920         IVBEP_UNCORE_PCI_COMMON_INIT(),
1921 };
1922
1923 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1924         .name           = "r3qpi",
1925         .num_counters   = 3,
1926         .num_boxes      = 2,
1927         .perf_ctr_bits  = 44,
1928         .constraints    = snbep_uncore_r3qpi_constraints,
1929         IVBEP_UNCORE_PCI_COMMON_INIT(),
1930 };
1931
1932 enum {
1933         IVBEP_PCI_UNCORE_HA,
1934         IVBEP_PCI_UNCORE_IMC,
1935         IVBEP_PCI_UNCORE_IRP,
1936         IVBEP_PCI_UNCORE_QPI,
1937         IVBEP_PCI_UNCORE_R2PCIE,
1938         IVBEP_PCI_UNCORE_R3QPI,
1939 };
1940
1941 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1942         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1943         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1944         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1945         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1946         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1947         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1948         NULL,
1949 };
1950
1951 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1952         { /* Home Agent 0 */
1953                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1954                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1955         },
1956         { /* Home Agent 1 */
1957                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1958                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1959         },
1960         { /* MC0 Channel 0 */
1961                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1962                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1963         },
1964         { /* MC0 Channel 1 */
1965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1966                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1967         },
1968         { /* MC0 Channel 3 */
1969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1970                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1971         },
1972         { /* MC0 Channel 4 */
1973                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1974                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1975         },
1976         { /* MC1 Channel 0 */
1977                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1978                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1979         },
1980         { /* MC1 Channel 1 */
1981                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1982                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1983         },
1984         { /* MC1 Channel 3 */
1985                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1986                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1987         },
1988         { /* MC1 Channel 4 */
1989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1990                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1991         },
1992         { /* IRP */
1993                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1994                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1995         },
1996         { /* QPI0 Port 0 */
1997                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1998                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1999         },
2000         { /* QPI0 Port 1 */
2001                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2002                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2003         },
2004         { /* QPI1 Port 2 */
2005                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2006                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2007         },
2008         { /* R2PCIe */
2009                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2010                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2011         },
2012         { /* R3QPI0 Link 0 */
2013                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2014                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2015         },
2016         { /* R3QPI0 Link 1 */
2017                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2018                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2019         },
2020         { /* R3QPI1 Link 2 */
2021                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2022                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2023         },
2024         { /* QPI Port 0 filter  */
2025                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2026                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2027                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2028         },
2029         { /* QPI Port 0 filter  */
2030                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2031                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2032                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2033         },
2034         { /* end: all zeroes */ }
2035 };
2036
2037 static struct pci_driver ivbep_uncore_pci_driver = {
2038         .name           = "ivbep_uncore",
2039         .id_table       = ivbep_uncore_pci_ids,
2040 };
2041
2042 int ivbep_uncore_pci_init(void)
2043 {
2044         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2045         if (ret)
2046                 return ret;
2047         uncore_pci_uncores = ivbep_pci_uncores;
2048         uncore_pci_driver = &ivbep_uncore_pci_driver;
2049         return 0;
2050 }
2051 /* end of IvyTown uncore support */
2052
2053 /* KNL uncore support */
2054 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2055         &format_attr_event.attr,
2056         &format_attr_umask.attr,
2057         &format_attr_edge.attr,
2058         &format_attr_tid_en.attr,
2059         &format_attr_inv.attr,
2060         &format_attr_thresh5.attr,
2061         NULL,
2062 };
2063
2064 static const struct attribute_group knl_uncore_ubox_format_group = {
2065         .name = "format",
2066         .attrs = knl_uncore_ubox_formats_attr,
2067 };
2068
2069 static struct intel_uncore_type knl_uncore_ubox = {
2070         .name                   = "ubox",
2071         .num_counters           = 2,
2072         .num_boxes              = 1,
2073         .perf_ctr_bits          = 48,
2074         .fixed_ctr_bits         = 48,
2075         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2076         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2077         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2078         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2079         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2080         .ops                    = &snbep_uncore_msr_ops,
2081         .format_group           = &knl_uncore_ubox_format_group,
2082 };
2083
2084 static struct attribute *knl_uncore_cha_formats_attr[] = {
2085         &format_attr_event.attr,
2086         &format_attr_umask.attr,
2087         &format_attr_qor.attr,
2088         &format_attr_edge.attr,
2089         &format_attr_tid_en.attr,
2090         &format_attr_inv.attr,
2091         &format_attr_thresh8.attr,
2092         &format_attr_filter_tid4.attr,
2093         &format_attr_filter_link3.attr,
2094         &format_attr_filter_state4.attr,
2095         &format_attr_filter_local.attr,
2096         &format_attr_filter_all_op.attr,
2097         &format_attr_filter_nnm.attr,
2098         &format_attr_filter_opc3.attr,
2099         &format_attr_filter_nc.attr,
2100         &format_attr_filter_isoc.attr,
2101         NULL,
2102 };
2103
2104 static const struct attribute_group knl_uncore_cha_format_group = {
2105         .name = "format",
2106         .attrs = knl_uncore_cha_formats_attr,
2107 };
2108
2109 static struct event_constraint knl_uncore_cha_constraints[] = {
2110         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2111         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2112         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2113         EVENT_CONSTRAINT_END
2114 };
2115
2116 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2117         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2118                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2119         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2120         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2121         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2122         EVENT_EXTRA_END
2123 };
2124
2125 static u64 knl_cha_filter_mask(int fields)
2126 {
2127         u64 mask = 0;
2128
2129         if (fields & 0x1)
2130                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2131         if (fields & 0x2)
2132                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2133         if (fields & 0x4)
2134                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2135         return mask;
2136 }
2137
2138 static struct event_constraint *
2139 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2140 {
2141         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2142 }
2143
2144 static int knl_cha_hw_config(struct intel_uncore_box *box,
2145                              struct perf_event *event)
2146 {
2147         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2148         struct extra_reg *er;
2149         int idx = 0;
2150
2151         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2152                 if (er->event != (event->hw.config & er->config_mask))
2153                         continue;
2154                 idx |= er->idx;
2155         }
2156
2157         if (idx) {
2158                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2159                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2160                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2161
2162                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2163                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2164                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2165                 reg1->idx = idx;
2166         }
2167         return 0;
2168 }
2169
2170 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2171                                     struct perf_event *event);
2172
2173 static struct intel_uncore_ops knl_uncore_cha_ops = {
2174         .init_box               = snbep_uncore_msr_init_box,
2175         .disable_box            = snbep_uncore_msr_disable_box,
2176         .enable_box             = snbep_uncore_msr_enable_box,
2177         .disable_event          = snbep_uncore_msr_disable_event,
2178         .enable_event           = hswep_cbox_enable_event,
2179         .read_counter           = uncore_msr_read_counter,
2180         .hw_config              = knl_cha_hw_config,
2181         .get_constraint         = knl_cha_get_constraint,
2182         .put_constraint         = snbep_cbox_put_constraint,
2183 };
2184
2185 static struct intel_uncore_type knl_uncore_cha = {
2186         .name                   = "cha",
2187         .num_counters           = 4,
2188         .num_boxes              = 38,
2189         .perf_ctr_bits          = 48,
2190         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2191         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2192         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2193         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2194         .msr_offset             = KNL_CHA_MSR_OFFSET,
2195         .num_shared_regs        = 1,
2196         .constraints            = knl_uncore_cha_constraints,
2197         .ops                    = &knl_uncore_cha_ops,
2198         .format_group           = &knl_uncore_cha_format_group,
2199 };
2200
2201 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2202         &format_attr_event2.attr,
2203         &format_attr_use_occ_ctr.attr,
2204         &format_attr_occ_sel.attr,
2205         &format_attr_edge.attr,
2206         &format_attr_tid_en.attr,
2207         &format_attr_inv.attr,
2208         &format_attr_thresh6.attr,
2209         &format_attr_occ_invert.attr,
2210         &format_attr_occ_edge_det.attr,
2211         NULL,
2212 };
2213
2214 static const struct attribute_group knl_uncore_pcu_format_group = {
2215         .name = "format",
2216         .attrs = knl_uncore_pcu_formats_attr,
2217 };
2218
2219 static struct intel_uncore_type knl_uncore_pcu = {
2220         .name                   = "pcu",
2221         .num_counters           = 4,
2222         .num_boxes              = 1,
2223         .perf_ctr_bits          = 48,
2224         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2225         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2226         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2227         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2228         .ops                    = &snbep_uncore_msr_ops,
2229         .format_group           = &knl_uncore_pcu_format_group,
2230 };
2231
2232 static struct intel_uncore_type *knl_msr_uncores[] = {
2233         &knl_uncore_ubox,
2234         &knl_uncore_cha,
2235         &knl_uncore_pcu,
2236         NULL,
2237 };
2238
2239 void knl_uncore_cpu_init(void)
2240 {
2241         uncore_msr_uncores = knl_msr_uncores;
2242 }
2243
2244 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2245 {
2246         struct pci_dev *pdev = box->pci_dev;
2247         int box_ctl = uncore_pci_box_ctl(box);
2248
2249         pci_write_config_dword(pdev, box_ctl, 0);
2250 }
2251
2252 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2253                                         struct perf_event *event)
2254 {
2255         struct pci_dev *pdev = box->pci_dev;
2256         struct hw_perf_event *hwc = &event->hw;
2257
2258         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2259                                                         == UNCORE_FIXED_EVENT)
2260                 pci_write_config_dword(pdev, hwc->config_base,
2261                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2262         else
2263                 pci_write_config_dword(pdev, hwc->config_base,
2264                                        hwc->config | SNBEP_PMON_CTL_EN);
2265 }
2266
2267 static struct intel_uncore_ops knl_uncore_imc_ops = {
2268         .init_box       = snbep_uncore_pci_init_box,
2269         .disable_box    = snbep_uncore_pci_disable_box,
2270         .enable_box     = knl_uncore_imc_enable_box,
2271         .read_counter   = snbep_uncore_pci_read_counter,
2272         .enable_event   = knl_uncore_imc_enable_event,
2273         .disable_event  = snbep_uncore_pci_disable_event,
2274 };
2275
2276 static struct intel_uncore_type knl_uncore_imc_uclk = {
2277         .name                   = "imc_uclk",
2278         .num_counters           = 4,
2279         .num_boxes              = 2,
2280         .perf_ctr_bits          = 48,
2281         .fixed_ctr_bits         = 48,
2282         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2283         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2284         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2285         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2286         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2287         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2288         .ops                    = &knl_uncore_imc_ops,
2289         .format_group           = &snbep_uncore_format_group,
2290 };
2291
2292 static struct intel_uncore_type knl_uncore_imc_dclk = {
2293         .name                   = "imc",
2294         .num_counters           = 4,
2295         .num_boxes              = 6,
2296         .perf_ctr_bits          = 48,
2297         .fixed_ctr_bits         = 48,
2298         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2299         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2300         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2301         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2302         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2303         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2304         .ops                    = &knl_uncore_imc_ops,
2305         .format_group           = &snbep_uncore_format_group,
2306 };
2307
2308 static struct intel_uncore_type knl_uncore_edc_uclk = {
2309         .name                   = "edc_uclk",
2310         .num_counters           = 4,
2311         .num_boxes              = 8,
2312         .perf_ctr_bits          = 48,
2313         .fixed_ctr_bits         = 48,
2314         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2315         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2316         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2317         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2318         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2319         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2320         .ops                    = &knl_uncore_imc_ops,
2321         .format_group           = &snbep_uncore_format_group,
2322 };
2323
2324 static struct intel_uncore_type knl_uncore_edc_eclk = {
2325         .name                   = "edc_eclk",
2326         .num_counters           = 4,
2327         .num_boxes              = 8,
2328         .perf_ctr_bits          = 48,
2329         .fixed_ctr_bits         = 48,
2330         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2331         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2332         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2333         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2334         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2335         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2336         .ops                    = &knl_uncore_imc_ops,
2337         .format_group           = &snbep_uncore_format_group,
2338 };
2339
2340 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2341         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2342         EVENT_CONSTRAINT_END
2343 };
2344
2345 static struct intel_uncore_type knl_uncore_m2pcie = {
2346         .name           = "m2pcie",
2347         .num_counters   = 4,
2348         .num_boxes      = 1,
2349         .perf_ctr_bits  = 48,
2350         .constraints    = knl_uncore_m2pcie_constraints,
2351         SNBEP_UNCORE_PCI_COMMON_INIT(),
2352 };
2353
2354 static struct attribute *knl_uncore_irp_formats_attr[] = {
2355         &format_attr_event.attr,
2356         &format_attr_umask.attr,
2357         &format_attr_qor.attr,
2358         &format_attr_edge.attr,
2359         &format_attr_inv.attr,
2360         &format_attr_thresh8.attr,
2361         NULL,
2362 };
2363
2364 static const struct attribute_group knl_uncore_irp_format_group = {
2365         .name = "format",
2366         .attrs = knl_uncore_irp_formats_attr,
2367 };
2368
2369 static struct intel_uncore_type knl_uncore_irp = {
2370         .name                   = "irp",
2371         .num_counters           = 2,
2372         .num_boxes              = 1,
2373         .perf_ctr_bits          = 48,
2374         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2375         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2376         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2377         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2378         .ops                    = &snbep_uncore_pci_ops,
2379         .format_group           = &knl_uncore_irp_format_group,
2380 };
2381
2382 enum {
2383         KNL_PCI_UNCORE_MC_UCLK,
2384         KNL_PCI_UNCORE_MC_DCLK,
2385         KNL_PCI_UNCORE_EDC_UCLK,
2386         KNL_PCI_UNCORE_EDC_ECLK,
2387         KNL_PCI_UNCORE_M2PCIE,
2388         KNL_PCI_UNCORE_IRP,
2389 };
2390
2391 static struct intel_uncore_type *knl_pci_uncores[] = {
2392         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2393         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2394         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2395         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2396         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2397         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2398         NULL,
2399 };
2400
2401 /*
2402  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2403  * device type. prior to KNL, each instance of a PMU device type had a unique
2404  * device ID.
2405  *
2406  *      PCI Device ID   Uncore PMU Devices
2407  *      ----------------------------------
2408  *      0x7841          MC0 UClk, MC1 UClk
2409  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2410  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2411  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2412  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2413  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2414  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2415  *      0x7817          M2PCIe
2416  *      0x7814          IRP
2417 */
2418
2419 static const struct pci_device_id knl_uncore_pci_ids[] = {
2420         { /* MC0 UClk */
2421                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2422                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2423         },
2424         { /* MC1 UClk */
2425                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2426                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2427         },
2428         { /* MC0 DClk CH 0 */
2429                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2430                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2431         },
2432         { /* MC0 DClk CH 1 */
2433                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2434                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2435         },
2436         { /* MC0 DClk CH 2 */
2437                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2438                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2439         },
2440         { /* MC1 DClk CH 0 */
2441                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2442                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2443         },
2444         { /* MC1 DClk CH 1 */
2445                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2446                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2447         },
2448         { /* MC1 DClk CH 2 */
2449                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2450                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2451         },
2452         { /* EDC0 UClk */
2453                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2454                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2455         },
2456         { /* EDC1 UClk */
2457                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2458                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2459         },
2460         { /* EDC2 UClk */
2461                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2462                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2463         },
2464         { /* EDC3 UClk */
2465                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2466                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2467         },
2468         { /* EDC4 UClk */
2469                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2470                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2471         },
2472         { /* EDC5 UClk */
2473                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2474                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2475         },
2476         { /* EDC6 UClk */
2477                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2478                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2479         },
2480         { /* EDC7 UClk */
2481                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2482                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2483         },
2484         { /* EDC0 EClk */
2485                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2486                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2487         },
2488         { /* EDC1 EClk */
2489                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2490                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2491         },
2492         { /* EDC2 EClk */
2493                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2494                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2495         },
2496         { /* EDC3 EClk */
2497                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2498                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2499         },
2500         { /* EDC4 EClk */
2501                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2502                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2503         },
2504         { /* EDC5 EClk */
2505                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2506                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2507         },
2508         { /* EDC6 EClk */
2509                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2510                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2511         },
2512         { /* EDC7 EClk */
2513                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2514                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2515         },
2516         { /* M2PCIe */
2517                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2518                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2519         },
2520         { /* IRP */
2521                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2522                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2523         },
2524         { /* end: all zeroes */ }
2525 };
2526
2527 static struct pci_driver knl_uncore_pci_driver = {
2528         .name           = "knl_uncore",
2529         .id_table       = knl_uncore_pci_ids,
2530 };
2531
2532 int knl_uncore_pci_init(void)
2533 {
2534         int ret;
2535
2536         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2537         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2538         if (ret)
2539                 return ret;
2540         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2541         if (ret)
2542                 return ret;
2543         uncore_pci_uncores = knl_pci_uncores;
2544         uncore_pci_driver = &knl_uncore_pci_driver;
2545         return 0;
2546 }
2547
2548 /* end of KNL uncore support */
2549
2550 /* Haswell-EP uncore support */
2551 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2552         &format_attr_event.attr,
2553         &format_attr_umask.attr,
2554         &format_attr_edge.attr,
2555         &format_attr_inv.attr,
2556         &format_attr_thresh5.attr,
2557         &format_attr_filter_tid2.attr,
2558         &format_attr_filter_cid.attr,
2559         NULL,
2560 };
2561
2562 static const struct attribute_group hswep_uncore_ubox_format_group = {
2563         .name = "format",
2564         .attrs = hswep_uncore_ubox_formats_attr,
2565 };
2566
2567 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2568 {
2569         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2570         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2571         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2572         reg1->idx = 0;
2573         return 0;
2574 }
2575
2576 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2577         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2578         .hw_config              = hswep_ubox_hw_config,
2579         .get_constraint         = uncore_get_constraint,
2580         .put_constraint         = uncore_put_constraint,
2581 };
2582
2583 static struct intel_uncore_type hswep_uncore_ubox = {
2584         .name                   = "ubox",
2585         .num_counters           = 2,
2586         .num_boxes              = 1,
2587         .perf_ctr_bits          = 44,
2588         .fixed_ctr_bits         = 48,
2589         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2590         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2591         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2592         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2593         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2594         .num_shared_regs        = 1,
2595         .ops                    = &hswep_uncore_ubox_ops,
2596         .format_group           = &hswep_uncore_ubox_format_group,
2597 };
2598
2599 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2600         &format_attr_event.attr,
2601         &format_attr_umask.attr,
2602         &format_attr_edge.attr,
2603         &format_attr_tid_en.attr,
2604         &format_attr_thresh8.attr,
2605         &format_attr_filter_tid3.attr,
2606         &format_attr_filter_link2.attr,
2607         &format_attr_filter_state3.attr,
2608         &format_attr_filter_nid2.attr,
2609         &format_attr_filter_opc2.attr,
2610         &format_attr_filter_nc.attr,
2611         &format_attr_filter_c6.attr,
2612         &format_attr_filter_isoc.attr,
2613         NULL,
2614 };
2615
2616 static const struct attribute_group hswep_uncore_cbox_format_group = {
2617         .name = "format",
2618         .attrs = hswep_uncore_cbox_formats_attr,
2619 };
2620
2621 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2622         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2623         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2624         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2625         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2626         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2627         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2628         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2629         EVENT_CONSTRAINT_END
2630 };
2631
2632 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2633         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2634                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2635         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2636         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2637         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2638         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2639         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2640         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2641         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2642         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2643         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2644         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2645         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2646         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2647         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2648         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2649         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2650         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2651         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2652         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2653         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2654         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2655         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2656         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2657         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2658         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2659         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2660         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2661         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2662         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2663         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2664         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2665         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2666         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2667         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2668         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2669         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2670         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2671         EVENT_EXTRA_END
2672 };
2673
2674 static u64 hswep_cbox_filter_mask(int fields)
2675 {
2676         u64 mask = 0;
2677         if (fields & 0x1)
2678                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2679         if (fields & 0x2)
2680                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2681         if (fields & 0x4)
2682                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2683         if (fields & 0x8)
2684                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2685         if (fields & 0x10) {
2686                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2687                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2688                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2689                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2690         }
2691         return mask;
2692 }
2693
2694 static struct event_constraint *
2695 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2696 {
2697         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2698 }
2699
2700 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2701 {
2702         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2703         struct extra_reg *er;
2704         int idx = 0;
2705
2706         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2707                 if (er->event != (event->hw.config & er->config_mask))
2708                         continue;
2709                 idx |= er->idx;
2710         }
2711
2712         if (idx) {
2713                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2714                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2715                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2716                 reg1->idx = idx;
2717         }
2718         return 0;
2719 }
2720
2721 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2722                                   struct perf_event *event)
2723 {
2724         struct hw_perf_event *hwc = &event->hw;
2725         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2726
2727         if (reg1->idx != EXTRA_REG_NONE) {
2728                 u64 filter = uncore_shared_reg_config(box, 0);
2729                 wrmsrl(reg1->reg, filter & 0xffffffff);
2730                 wrmsrl(reg1->reg + 1, filter >> 32);
2731         }
2732
2733         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2734 }
2735
2736 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2737         .init_box               = snbep_uncore_msr_init_box,
2738         .disable_box            = snbep_uncore_msr_disable_box,
2739         .enable_box             = snbep_uncore_msr_enable_box,
2740         .disable_event          = snbep_uncore_msr_disable_event,
2741         .enable_event           = hswep_cbox_enable_event,
2742         .read_counter           = uncore_msr_read_counter,
2743         .hw_config              = hswep_cbox_hw_config,
2744         .get_constraint         = hswep_cbox_get_constraint,
2745         .put_constraint         = snbep_cbox_put_constraint,
2746 };
2747
2748 static struct intel_uncore_type hswep_uncore_cbox = {
2749         .name                   = "cbox",
2750         .num_counters           = 4,
2751         .num_boxes              = 18,
2752         .perf_ctr_bits          = 48,
2753         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2754         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2755         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2756         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2757         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2758         .num_shared_regs        = 1,
2759         .constraints            = hswep_uncore_cbox_constraints,
2760         .ops                    = &hswep_uncore_cbox_ops,
2761         .format_group           = &hswep_uncore_cbox_format_group,
2762 };
2763
2764 /*
2765  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2766  */
2767 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2768 {
2769         unsigned msr = uncore_msr_box_ctl(box);
2770
2771         if (msr) {
2772                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2773                 u64 flags = 0;
2774                 int i;
2775
2776                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2777                         flags |= (1ULL << i);
2778                         wrmsrl(msr, flags);
2779                 }
2780         }
2781 }
2782
2783 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2784         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2785         .init_box               = hswep_uncore_sbox_msr_init_box
2786 };
2787
2788 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2789         &format_attr_event.attr,
2790         &format_attr_umask.attr,
2791         &format_attr_edge.attr,
2792         &format_attr_tid_en.attr,
2793         &format_attr_inv.attr,
2794         &format_attr_thresh8.attr,
2795         NULL,
2796 };
2797
2798 static const struct attribute_group hswep_uncore_sbox_format_group = {
2799         .name = "format",
2800         .attrs = hswep_uncore_sbox_formats_attr,
2801 };
2802
2803 static struct intel_uncore_type hswep_uncore_sbox = {
2804         .name                   = "sbox",
2805         .num_counters           = 4,
2806         .num_boxes              = 4,
2807         .perf_ctr_bits          = 44,
2808         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2809         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2810         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2811         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2812         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2813         .ops                    = &hswep_uncore_sbox_msr_ops,
2814         .format_group           = &hswep_uncore_sbox_format_group,
2815 };
2816
2817 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2818 {
2819         struct hw_perf_event *hwc = &event->hw;
2820         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2821         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2822
2823         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2824                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2825                 reg1->idx = ev_sel - 0xb;
2826                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2827         }
2828         return 0;
2829 }
2830
2831 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2832         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2833         .hw_config              = hswep_pcu_hw_config,
2834         .get_constraint         = snbep_pcu_get_constraint,
2835         .put_constraint         = snbep_pcu_put_constraint,
2836 };
2837
2838 static struct intel_uncore_type hswep_uncore_pcu = {
2839         .name                   = "pcu",
2840         .num_counters           = 4,
2841         .num_boxes              = 1,
2842         .perf_ctr_bits          = 48,
2843         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2844         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2845         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2846         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2847         .num_shared_regs        = 1,
2848         .ops                    = &hswep_uncore_pcu_ops,
2849         .format_group           = &snbep_uncore_pcu_format_group,
2850 };
2851
2852 static struct intel_uncore_type *hswep_msr_uncores[] = {
2853         &hswep_uncore_ubox,
2854         &hswep_uncore_cbox,
2855         &hswep_uncore_sbox,
2856         &hswep_uncore_pcu,
2857         NULL,
2858 };
2859
2860 void hswep_uncore_cpu_init(void)
2861 {
2862         int pkg = boot_cpu_data.logical_proc_id;
2863
2864         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2865                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2866
2867         /* Detect 6-8 core systems with only two SBOXes */
2868         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2869                 u32 capid4;
2870
2871                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2872                                       0x94, &capid4);
2873                 if (((capid4 >> 6) & 0x3) == 0)
2874                         hswep_uncore_sbox.num_boxes = 2;
2875         }
2876
2877         uncore_msr_uncores = hswep_msr_uncores;
2878 }
2879
2880 static struct intel_uncore_type hswep_uncore_ha = {
2881         .name           = "ha",
2882         .num_counters   = 4,
2883         .num_boxes      = 2,
2884         .perf_ctr_bits  = 48,
2885         SNBEP_UNCORE_PCI_COMMON_INIT(),
2886 };
2887
2888 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2889         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2890         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2891         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2892         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2893         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2894         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2895         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2896         { /* end: all zeroes */ },
2897 };
2898
2899 static struct intel_uncore_type hswep_uncore_imc = {
2900         .name           = "imc",
2901         .num_counters   = 4,
2902         .num_boxes      = 8,
2903         .perf_ctr_bits  = 48,
2904         .fixed_ctr_bits = 48,
2905         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2906         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2907         .event_descs    = hswep_uncore_imc_events,
2908         SNBEP_UNCORE_PCI_COMMON_INIT(),
2909 };
2910
2911 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2912
2913 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2914 {
2915         struct pci_dev *pdev = box->pci_dev;
2916         struct hw_perf_event *hwc = &event->hw;
2917         u64 count = 0;
2918
2919         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2920         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2921
2922         return count;
2923 }
2924
2925 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2926         .init_box       = snbep_uncore_pci_init_box,
2927         .disable_box    = snbep_uncore_pci_disable_box,
2928         .enable_box     = snbep_uncore_pci_enable_box,
2929         .disable_event  = ivbep_uncore_irp_disable_event,
2930         .enable_event   = ivbep_uncore_irp_enable_event,
2931         .read_counter   = hswep_uncore_irp_read_counter,
2932 };
2933
2934 static struct intel_uncore_type hswep_uncore_irp = {
2935         .name                   = "irp",
2936         .num_counters           = 4,
2937         .num_boxes              = 1,
2938         .perf_ctr_bits          = 48,
2939         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2940         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2941         .ops                    = &hswep_uncore_irp_ops,
2942         .format_group           = &snbep_uncore_format_group,
2943 };
2944
2945 static struct intel_uncore_type hswep_uncore_qpi = {
2946         .name                   = "qpi",
2947         .num_counters           = 4,
2948         .num_boxes              = 3,
2949         .perf_ctr_bits          = 48,
2950         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2951         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2952         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2953         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2954         .num_shared_regs        = 1,
2955         .ops                    = &snbep_uncore_qpi_ops,
2956         .format_group           = &snbep_uncore_qpi_format_group,
2957 };
2958
2959 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2960         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2961         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2962         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2963         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2964         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2965         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2966         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2967         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2968         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2969         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2970         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2971         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2972         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2973         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2974         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2975         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2976         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2977         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2978         EVENT_CONSTRAINT_END
2979 };
2980
2981 static struct intel_uncore_type hswep_uncore_r2pcie = {
2982         .name           = "r2pcie",
2983         .num_counters   = 4,
2984         .num_boxes      = 1,
2985         .perf_ctr_bits  = 48,
2986         .constraints    = hswep_uncore_r2pcie_constraints,
2987         SNBEP_UNCORE_PCI_COMMON_INIT(),
2988 };
2989
2990 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2991         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2992         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2993         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2994         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2995         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2996         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2997         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2998         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2999         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3000         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3001         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3002         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3003         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3004         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3005         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3006         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3007         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3008         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3009         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3010         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3011         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3012         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3013         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3014         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3015         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3016         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3017         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3018         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3019         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3020         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3021         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3022         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3023         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3024         EVENT_CONSTRAINT_END
3025 };
3026
3027 static struct intel_uncore_type hswep_uncore_r3qpi = {
3028         .name           = "r3qpi",
3029         .num_counters   = 3,
3030         .num_boxes      = 3,
3031         .perf_ctr_bits  = 44,
3032         .constraints    = hswep_uncore_r3qpi_constraints,
3033         SNBEP_UNCORE_PCI_COMMON_INIT(),
3034 };
3035
3036 enum {
3037         HSWEP_PCI_UNCORE_HA,
3038         HSWEP_PCI_UNCORE_IMC,
3039         HSWEP_PCI_UNCORE_IRP,
3040         HSWEP_PCI_UNCORE_QPI,
3041         HSWEP_PCI_UNCORE_R2PCIE,
3042         HSWEP_PCI_UNCORE_R3QPI,
3043 };
3044
3045 static struct intel_uncore_type *hswep_pci_uncores[] = {
3046         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
3047         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
3048         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
3049         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
3050         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
3051         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
3052         NULL,
3053 };
3054
3055 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3056         { /* Home Agent 0 */
3057                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3058                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3059         },
3060         { /* Home Agent 1 */
3061                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3062                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3063         },
3064         { /* MC0 Channel 0 */
3065                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3066                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3067         },
3068         { /* MC0 Channel 1 */
3069                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3070                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3071         },
3072         { /* MC0 Channel 2 */
3073                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3074                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3075         },
3076         { /* MC0 Channel 3 */
3077                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3078                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3079         },
3080         { /* MC1 Channel 0 */
3081                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3082                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3083         },
3084         { /* MC1 Channel 1 */
3085                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3086                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3087         },
3088         { /* MC1 Channel 2 */
3089                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3090                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3091         },
3092         { /* MC1 Channel 3 */
3093                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3094                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3095         },
3096         { /* IRP */
3097                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3098                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3099         },
3100         { /* QPI0 Port 0 */
3101                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3102                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3103         },
3104         { /* QPI0 Port 1 */
3105                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3106                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3107         },
3108         { /* QPI1 Port 2 */
3109                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3110                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3111         },
3112         { /* R2PCIe */
3113                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3114                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3115         },
3116         { /* R3QPI0 Link 0 */
3117                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3118                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3119         },
3120         { /* R3QPI0 Link 1 */
3121                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3122                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3123         },
3124         { /* R3QPI1 Link 2 */
3125                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3126                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3127         },
3128         { /* QPI Port 0 filter  */
3129                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3130                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3131                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3132         },
3133         { /* QPI Port 1 filter  */
3134                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3135                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3136                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3137         },
3138         { /* PCU.3 (for Capability registers) */
3139                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3140                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3141                                                    HSWEP_PCI_PCU_3),
3142         },
3143         { /* end: all zeroes */ }
3144 };
3145
3146 static struct pci_driver hswep_uncore_pci_driver = {
3147         .name           = "hswep_uncore",
3148         .id_table       = hswep_uncore_pci_ids,
3149 };
3150
3151 int hswep_uncore_pci_init(void)
3152 {
3153         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3154         if (ret)
3155                 return ret;
3156         uncore_pci_uncores = hswep_pci_uncores;
3157         uncore_pci_driver = &hswep_uncore_pci_driver;
3158         return 0;
3159 }
3160 /* end of Haswell-EP uncore support */
3161
3162 /* BDX uncore support */
3163
3164 static struct intel_uncore_type bdx_uncore_ubox = {
3165         .name                   = "ubox",
3166         .num_counters           = 2,
3167         .num_boxes              = 1,
3168         .perf_ctr_bits          = 48,
3169         .fixed_ctr_bits         = 48,
3170         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3171         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3172         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3173         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3174         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3175         .num_shared_regs        = 1,
3176         .ops                    = &ivbep_uncore_msr_ops,
3177         .format_group           = &ivbep_uncore_ubox_format_group,
3178 };
3179
3180 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3181         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3182         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3183         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3184         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3185         EVENT_CONSTRAINT_END
3186 };
3187
3188 static struct intel_uncore_type bdx_uncore_cbox = {
3189         .name                   = "cbox",
3190         .num_counters           = 4,
3191         .num_boxes              = 24,
3192         .perf_ctr_bits          = 48,
3193         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3194         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3195         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3196         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3197         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3198         .num_shared_regs        = 1,
3199         .constraints            = bdx_uncore_cbox_constraints,
3200         .ops                    = &hswep_uncore_cbox_ops,
3201         .format_group           = &hswep_uncore_cbox_format_group,
3202 };
3203
3204 static struct intel_uncore_type bdx_uncore_sbox = {
3205         .name                   = "sbox",
3206         .num_counters           = 4,
3207         .num_boxes              = 4,
3208         .perf_ctr_bits          = 48,
3209         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3210         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3211         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3212         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3213         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3214         .ops                    = &hswep_uncore_sbox_msr_ops,
3215         .format_group           = &hswep_uncore_sbox_format_group,
3216 };
3217
3218 #define BDX_MSR_UNCORE_SBOX     3
3219
3220 static struct intel_uncore_type *bdx_msr_uncores[] = {
3221         &bdx_uncore_ubox,
3222         &bdx_uncore_cbox,
3223         &hswep_uncore_pcu,
3224         &bdx_uncore_sbox,
3225         NULL,
3226 };
3227
3228 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3229 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3230         EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3231         EVENT_CONSTRAINT_END
3232 };
3233
3234 void bdx_uncore_cpu_init(void)
3235 {
3236         int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3237
3238         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3239                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3240         uncore_msr_uncores = bdx_msr_uncores;
3241
3242         /* BDX-DE doesn't have SBOX */
3243         if (boot_cpu_data.x86_model == 86) {
3244                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3245         /* Detect systems with no SBOXes */
3246         } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3247                 struct pci_dev *pdev;
3248                 u32 capid4;
3249
3250                 pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3251                 pci_read_config_dword(pdev, 0x94, &capid4);
3252                 if (((capid4 >> 6) & 0x3) == 0)
3253                         bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3254         }
3255         hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3256 }
3257
3258 static struct intel_uncore_type bdx_uncore_ha = {
3259         .name           = "ha",
3260         .num_counters   = 4,
3261         .num_boxes      = 2,
3262         .perf_ctr_bits  = 48,
3263         SNBEP_UNCORE_PCI_COMMON_INIT(),
3264 };
3265
3266 static struct intel_uncore_type bdx_uncore_imc = {
3267         .name           = "imc",
3268         .num_counters   = 4,
3269         .num_boxes      = 8,
3270         .perf_ctr_bits  = 48,
3271         .fixed_ctr_bits = 48,
3272         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3273         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3274         .event_descs    = hswep_uncore_imc_events,
3275         SNBEP_UNCORE_PCI_COMMON_INIT(),
3276 };
3277
3278 static struct intel_uncore_type bdx_uncore_irp = {
3279         .name                   = "irp",
3280         .num_counters           = 4,
3281         .num_boxes              = 1,
3282         .perf_ctr_bits          = 48,
3283         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3284         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3285         .ops                    = &hswep_uncore_irp_ops,
3286         .format_group           = &snbep_uncore_format_group,
3287 };
3288
3289 static struct intel_uncore_type bdx_uncore_qpi = {
3290         .name                   = "qpi",
3291         .num_counters           = 4,
3292         .num_boxes              = 3,
3293         .perf_ctr_bits          = 48,
3294         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3295         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3296         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3297         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3298         .num_shared_regs        = 1,
3299         .ops                    = &snbep_uncore_qpi_ops,
3300         .format_group           = &snbep_uncore_qpi_format_group,
3301 };
3302
3303 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3304         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3305         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3306         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3307         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3308         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3309         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3310         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3311         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3312         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3313         EVENT_CONSTRAINT_END
3314 };
3315
3316 static struct intel_uncore_type bdx_uncore_r2pcie = {
3317         .name           = "r2pcie",
3318         .num_counters   = 4,
3319         .num_boxes      = 1,
3320         .perf_ctr_bits  = 48,
3321         .constraints    = bdx_uncore_r2pcie_constraints,
3322         SNBEP_UNCORE_PCI_COMMON_INIT(),
3323 };
3324
3325 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3326         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3327         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3328         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3329         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3330         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3331         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3332         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3333         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3334         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3335         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3336         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3337         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3338         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3339         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3340         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3341         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3342         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3343         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3344         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3345         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3346         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3347         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3348         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3349         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3350         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3351         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3352         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3353         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3354         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3355         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3356         EVENT_CONSTRAINT_END
3357 };
3358
3359 static struct intel_uncore_type bdx_uncore_r3qpi = {
3360         .name           = "r3qpi",
3361         .num_counters   = 3,
3362         .num_boxes      = 3,
3363         .perf_ctr_bits  = 48,
3364         .constraints    = bdx_uncore_r3qpi_constraints,
3365         SNBEP_UNCORE_PCI_COMMON_INIT(),
3366 };
3367
3368 enum {
3369         BDX_PCI_UNCORE_HA,
3370         BDX_PCI_UNCORE_IMC,
3371         BDX_PCI_UNCORE_IRP,
3372         BDX_PCI_UNCORE_QPI,
3373         BDX_PCI_UNCORE_R2PCIE,
3374         BDX_PCI_UNCORE_R3QPI,
3375 };
3376
3377 static struct intel_uncore_type *bdx_pci_uncores[] = {
3378         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3379         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3380         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3381         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3382         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3383         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3384         NULL,
3385 };
3386
3387 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3388         { /* Home Agent 0 */
3389                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3390                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3391         },
3392         { /* Home Agent 1 */
3393                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3394                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3395         },
3396         { /* MC0 Channel 0 */
3397                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3398                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3399         },
3400         { /* MC0 Channel 1 */
3401                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3402                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3403         },
3404         { /* MC0 Channel 2 */
3405                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3406                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3407         },
3408         { /* MC0 Channel 3 */
3409                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3410                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3411         },
3412         { /* MC1 Channel 0 */
3413                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3414                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3415         },
3416         { /* MC1 Channel 1 */
3417                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3418                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3419         },
3420         { /* MC1 Channel 2 */
3421                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3422                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3423         },
3424         { /* MC1 Channel 3 */
3425                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3426                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3427         },
3428         { /* IRP */
3429                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3430                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3431         },
3432         { /* QPI0 Port 0 */
3433                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3434                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3435         },
3436         { /* QPI0 Port 1 */
3437                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3438                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3439         },
3440         { /* QPI1 Port 2 */
3441                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3442                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3443         },
3444         { /* R2PCIe */
3445                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3446                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3447         },
3448         { /* R3QPI0 Link 0 */
3449                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3450                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3451         },
3452         { /* R3QPI0 Link 1 */
3453                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3454                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3455         },
3456         { /* R3QPI1 Link 2 */
3457                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3458                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3459         },
3460         { /* QPI Port 0 filter  */
3461                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3462                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3463                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3464         },
3465         { /* QPI Port 1 filter  */
3466                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3467                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3468                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3469         },
3470         { /* QPI Port 2 filter  */
3471                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3472                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3473                                                    BDX_PCI_QPI_PORT2_FILTER),
3474         },
3475         { /* PCU.3 (for Capability registers) */
3476                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3477                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3478                                                    HSWEP_PCI_PCU_3),
3479         },
3480         { /* end: all zeroes */ }
3481 };
3482
3483 static struct pci_driver bdx_uncore_pci_driver = {
3484         .name           = "bdx_uncore",
3485         .id_table       = bdx_uncore_pci_ids,
3486 };
3487
3488 int bdx_uncore_pci_init(void)
3489 {
3490         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3491
3492         if (ret)
3493                 return ret;
3494         uncore_pci_uncores = bdx_pci_uncores;
3495         uncore_pci_driver = &bdx_uncore_pci_driver;
3496         return 0;
3497 }
3498
3499 /* end of BDX uncore support */
3500
3501 /* SKX uncore support */
3502
3503 static struct intel_uncore_type skx_uncore_ubox = {
3504         .name                   = "ubox",
3505         .num_counters           = 2,
3506         .num_boxes              = 1,
3507         .perf_ctr_bits          = 48,
3508         .fixed_ctr_bits         = 48,
3509         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3510         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3511         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3512         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3513         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3514         .ops                    = &ivbep_uncore_msr_ops,
3515         .format_group           = &ivbep_uncore_ubox_format_group,
3516 };
3517
3518 static struct attribute *skx_uncore_cha_formats_attr[] = {
3519         &format_attr_event.attr,
3520         &format_attr_umask.attr,
3521         &format_attr_edge.attr,
3522         &format_attr_tid_en.attr,
3523         &format_attr_inv.attr,
3524         &format_attr_thresh8.attr,
3525         &format_attr_filter_tid4.attr,
3526         &format_attr_filter_state5.attr,
3527         &format_attr_filter_rem.attr,
3528         &format_attr_filter_loc.attr,
3529         &format_attr_filter_nm.attr,
3530         &format_attr_filter_all_op.attr,
3531         &format_attr_filter_not_nm.attr,
3532         &format_attr_filter_opc_0.attr,
3533         &format_attr_filter_opc_1.attr,
3534         &format_attr_filter_nc.attr,
3535         &format_attr_filter_isoc.attr,
3536         NULL,
3537 };
3538
3539 static const struct attribute_group skx_uncore_chabox_format_group = {
3540         .name = "format",
3541         .attrs = skx_uncore_cha_formats_attr,
3542 };
3543
3544 static struct event_constraint skx_uncore_chabox_constraints[] = {
3545         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3546         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3547         EVENT_CONSTRAINT_END
3548 };
3549
3550 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3551         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3552         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3553         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3554         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3555         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3556         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3557         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3558         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3559         SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3560         EVENT_EXTRA_END
3561 };
3562
3563 static u64 skx_cha_filter_mask(int fields)
3564 {
3565         u64 mask = 0;
3566
3567         if (fields & 0x1)
3568                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3569         if (fields & 0x2)
3570                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3571         if (fields & 0x4)
3572                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3573         if (fields & 0x8) {
3574                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3575                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3576                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3577                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3578                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3579                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3580                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3581                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3582                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3583         }
3584         return mask;
3585 }
3586
3587 static struct event_constraint *
3588 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3589 {
3590         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3591 }
3592
3593 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3594 {
3595         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3596         struct extra_reg *er;
3597         int idx = 0;
3598
3599         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3600                 if (er->event != (event->hw.config & er->config_mask))
3601                         continue;
3602                 idx |= er->idx;
3603         }
3604
3605         if (idx) {
3606                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3607                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3608                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3609                 reg1->idx = idx;
3610         }
3611         return 0;
3612 }
3613
3614 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3615         /* There is no frz_en for chabox ctl */
3616         .init_box               = ivbep_uncore_msr_init_box,
3617         .disable_box            = snbep_uncore_msr_disable_box,
3618         .enable_box             = snbep_uncore_msr_enable_box,
3619         .disable_event          = snbep_uncore_msr_disable_event,
3620         .enable_event           = hswep_cbox_enable_event,
3621         .read_counter           = uncore_msr_read_counter,
3622         .hw_config              = skx_cha_hw_config,
3623         .get_constraint         = skx_cha_get_constraint,
3624         .put_constraint         = snbep_cbox_put_constraint,
3625 };
3626
3627 static struct intel_uncore_type skx_uncore_chabox = {
3628         .name                   = "cha",
3629         .num_counters           = 4,
3630         .perf_ctr_bits          = 48,
3631         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3632         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3633         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3634         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3635         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3636         .num_shared_regs        = 1,
3637         .constraints            = skx_uncore_chabox_constraints,
3638         .ops                    = &skx_uncore_chabox_ops,
3639         .format_group           = &skx_uncore_chabox_format_group,
3640 };
3641
3642 static struct attribute *skx_uncore_iio_formats_attr[] = {
3643         &format_attr_event.attr,
3644         &format_attr_umask.attr,
3645         &format_attr_edge.attr,
3646         &format_attr_inv.attr,
3647         &format_attr_thresh9.attr,
3648         &format_attr_ch_mask.attr,
3649         &format_attr_fc_mask.attr,
3650         NULL,
3651 };
3652
3653 static const struct attribute_group skx_uncore_iio_format_group = {
3654         .name = "format",
3655         .attrs = skx_uncore_iio_formats_attr,
3656 };
3657
3658 static struct event_constraint skx_uncore_iio_constraints[] = {
3659         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3660         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3661         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3662         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3663         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3664         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3665         EVENT_CONSTRAINT_END
3666 };
3667
3668 static void skx_iio_enable_event(struct intel_uncore_box *box,
3669                                  struct perf_event *event)
3670 {
3671         struct hw_perf_event *hwc = &event->hw;
3672
3673         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3674 }
3675
3676 static struct intel_uncore_ops skx_uncore_iio_ops = {
3677         .init_box               = ivbep_uncore_msr_init_box,
3678         .disable_box            = snbep_uncore_msr_disable_box,
3679         .enable_box             = snbep_uncore_msr_enable_box,
3680         .disable_event          = snbep_uncore_msr_disable_event,
3681         .enable_event           = skx_iio_enable_event,
3682         .read_counter           = uncore_msr_read_counter,
3683 };
3684
3685 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3686 {
3687         return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3688 }
3689
3690 static umode_t
3691 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3692 {
3693         struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3694
3695         /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3696         return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3697 }
3698
3699 static ssize_t skx_iio_mapping_show(struct device *dev,
3700                                 struct device_attribute *attr, char *buf)
3701 {
3702         struct pci_bus *bus = pci_find_next_bus(NULL);
3703         struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3704         struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3705         long die = (long)ea->var;
3706
3707         /*
3708          * Current implementation is for single segment configuration hence it's
3709          * safe to take the segment value from the first available root bus.
3710          */
3711         return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3712                                            skx_iio_stack(uncore_pmu, die));
3713 }
3714
3715 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3716 {
3717         u64 msr_value;
3718
3719         if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3720                         !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3721                 return -ENXIO;
3722
3723         *topology = msr_value;
3724
3725         return 0;
3726 }
3727
3728 static int die_to_cpu(int die)
3729 {
3730         int res = 0, cpu, current_die;
3731         /*
3732          * Using cpus_read_lock() to ensure cpu is not going down between
3733          * looking at cpu_online_mask.
3734          */
3735         cpus_read_lock();
3736         for_each_online_cpu(cpu) {
3737                 current_die = topology_logical_die_id(cpu);
3738                 if (current_die == die) {
3739                         res = cpu;
3740                         break;
3741                 }
3742         }
3743         cpus_read_unlock();
3744         return res;
3745 }
3746
3747 static int skx_iio_get_topology(struct intel_uncore_type *type)
3748 {
3749         int i, ret;
3750         struct pci_bus *bus = NULL;
3751
3752         /*
3753          * Verified single-segment environments only; disabled for multiple
3754          * segment topologies for now except VMD domains.
3755          * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3756          */
3757         while ((bus = pci_find_next_bus(bus))
3758                 && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3759                 ;
3760         if (bus)
3761                 return -EPERM;
3762
3763         type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3764         if (!type->topology)
3765                 return -ENOMEM;
3766
3767         for (i = 0; i < uncore_max_dies(); i++) {
3768                 ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3769                 if (ret) {
3770                         kfree(type->topology);
3771                         type->topology = NULL;
3772                         return ret;
3773                 }
3774         }
3775
3776         return 0;
3777 }
3778
3779 static struct attribute_group skx_iio_mapping_group = {
3780         .is_visible     = skx_iio_mapping_visible,
3781 };
3782
3783 static const struct attribute_group *skx_iio_attr_update[] = {
3784         &skx_iio_mapping_group,
3785         NULL,
3786 };
3787
3788 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3789 {
3790         char buf[64];
3791         int ret;
3792         long die = -1;
3793         struct attribute **attrs = NULL;
3794         struct dev_ext_attribute *eas = NULL;
3795
3796         ret = skx_iio_get_topology(type);
3797         if (ret)
3798                 goto clear_attr_update;
3799
3800         ret = -ENOMEM;
3801
3802         /* One more for NULL. */
3803         attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3804         if (!attrs)
3805                 goto err;
3806
3807         eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3808         if (!eas)
3809                 goto err;
3810
3811         for (die = 0; die < uncore_max_dies(); die++) {
3812                 sprintf(buf, "die%ld", die);
3813                 sysfs_attr_init(&eas[die].attr.attr);
3814                 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3815                 if (!eas[die].attr.attr.name)
3816                         goto err;
3817                 eas[die].attr.attr.mode = 0444;
3818                 eas[die].attr.show = skx_iio_mapping_show;
3819                 eas[die].attr.store = NULL;
3820                 eas[die].var = (void *)die;
3821                 attrs[die] = &eas[die].attr.attr;
3822         }
3823         skx_iio_mapping_group.attrs = attrs;
3824
3825         return 0;
3826 err:
3827         for (; die >= 0; die--)
3828                 kfree(eas[die].attr.attr.name);
3829         kfree(eas);
3830         kfree(attrs);
3831         kfree(type->topology);
3832 clear_attr_update:
3833         type->attr_update = NULL;
3834         return ret;
3835 }
3836
3837 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3838 {
3839         struct attribute **attr = skx_iio_mapping_group.attrs;
3840
3841         if (!attr)
3842                 return;
3843
3844         for (; *attr; attr++)
3845                 kfree((*attr)->name);
3846         kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3847         kfree(skx_iio_mapping_group.attrs);
3848         skx_iio_mapping_group.attrs = NULL;
3849         kfree(type->topology);
3850 }
3851
3852 static struct intel_uncore_type skx_uncore_iio = {
3853         .name                   = "iio",
3854         .num_counters           = 4,
3855         .num_boxes              = 6,
3856         .perf_ctr_bits          = 48,
3857         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3858         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3859         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3860         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3861         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3862         .msr_offset             = SKX_IIO_MSR_OFFSET,
3863         .constraints            = skx_uncore_iio_constraints,
3864         .ops                    = &skx_uncore_iio_ops,
3865         .format_group           = &skx_uncore_iio_format_group,
3866         .attr_update            = skx_iio_attr_update,
3867         .set_mapping            = skx_iio_set_mapping,
3868         .cleanup_mapping        = skx_iio_cleanup_mapping,
3869 };
3870
3871 enum perf_uncore_iio_freerunning_type_id {
3872         SKX_IIO_MSR_IOCLK                       = 0,
3873         SKX_IIO_MSR_BW                          = 1,
3874         SKX_IIO_MSR_UTIL                        = 2,
3875
3876         SKX_IIO_FREERUNNING_TYPE_MAX,
3877 };
3878
3879
3880 static struct freerunning_counters skx_iio_freerunning[] = {
3881         [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3882         [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3883         [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3884 };
3885
3886 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3887         /* Free-Running IO CLOCKS Counter */
3888         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3889         /* Free-Running IIO BANDWIDTH Counters */
3890         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3891         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3892         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3893         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3894         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3895         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3896         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3897         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3898         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3899         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3900         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3901         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3902         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3903         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3904         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3905         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3906         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3907         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3908         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3909         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3910         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3911         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3912         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3913         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3914         /* Free-running IIO UTILIZATION Counters */
3915         INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3916         INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3917         INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3918         INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3919         INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3920         INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3921         INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3922         INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3923         { /* end: all zeroes */ },
3924 };
3925
3926 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3927         .read_counter           = uncore_msr_read_counter,
3928         .hw_config              = uncore_freerunning_hw_config,
3929 };
3930
3931 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3932         &format_attr_event.attr,
3933         &format_attr_umask.attr,
3934         NULL,
3935 };
3936
3937 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3938         .name = "format",
3939         .attrs = skx_uncore_iio_freerunning_formats_attr,
3940 };
3941
3942 static struct intel_uncore_type skx_uncore_iio_free_running = {
3943         .name                   = "iio_free_running",
3944         .num_counters           = 17,
3945         .num_boxes              = 6,
3946         .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3947         .freerunning            = skx_iio_freerunning,
3948         .ops                    = &skx_uncore_iio_freerunning_ops,
3949         .event_descs            = skx_uncore_iio_freerunning_events,
3950         .format_group           = &skx_uncore_iio_freerunning_format_group,
3951 };
3952
3953 static struct attribute *skx_uncore_formats_attr[] = {
3954         &format_attr_event.attr,
3955         &format_attr_umask.attr,
3956         &format_attr_edge.attr,
3957         &format_attr_inv.attr,
3958         &format_attr_thresh8.attr,
3959         NULL,
3960 };
3961
3962 static const struct attribute_group skx_uncore_format_group = {
3963         .name = "format",
3964         .attrs = skx_uncore_formats_attr,
3965 };
3966
3967 static struct intel_uncore_type skx_uncore_irp = {
3968         .name                   = "irp",
3969         .num_counters           = 2,
3970         .num_boxes              = 6,
3971         .perf_ctr_bits          = 48,
3972         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3973         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3974         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3975         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3976         .msr_offset             = SKX_IRP_MSR_OFFSET,
3977         .ops                    = &skx_uncore_iio_ops,
3978         .format_group           = &skx_uncore_format_group,
3979 };
3980
3981 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3982         &format_attr_event.attr,
3983         &format_attr_umask.attr,
3984         &format_attr_edge.attr,
3985         &format_attr_inv.attr,
3986         &format_attr_thresh8.attr,
3987         &format_attr_occ_invert.attr,
3988         &format_attr_occ_edge_det.attr,
3989         &format_attr_filter_band0.attr,
3990         &format_attr_filter_band1.attr,
3991         &format_attr_filter_band2.attr,
3992         &format_attr_filter_band3.attr,
3993         NULL,
3994 };
3995
3996 static struct attribute_group skx_uncore_pcu_format_group = {
3997         .name = "format",
3998         .attrs = skx_uncore_pcu_formats_attr,
3999 };
4000
4001 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4002         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4003         .hw_config              = hswep_pcu_hw_config,
4004         .get_constraint         = snbep_pcu_get_constraint,
4005         .put_constraint         = snbep_pcu_put_constraint,
4006 };
4007
4008 static struct intel_uncore_type skx_uncore_pcu = {
4009         .name                   = "pcu",
4010         .num_counters           = 4,
4011         .num_boxes              = 1,
4012         .perf_ctr_bits          = 48,
4013         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
4014         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
4015         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4016         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
4017         .num_shared_regs        = 1,
4018         .ops                    = &skx_uncore_pcu_ops,
4019         .format_group           = &skx_uncore_pcu_format_group,
4020 };
4021
4022 static struct intel_uncore_type *skx_msr_uncores[] = {
4023         &skx_uncore_ubox,
4024         &skx_uncore_chabox,
4025         &skx_uncore_iio,
4026         &skx_uncore_iio_free_running,
4027         &skx_uncore_irp,
4028         &skx_uncore_pcu,
4029         NULL,
4030 };
4031
4032 /*
4033  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4034  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4035  */
4036 #define SKX_CAPID6              0x9c
4037 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
4038
4039 static int skx_count_chabox(void)
4040 {
4041         struct pci_dev *dev = NULL;
4042         u32 val = 0;
4043
4044         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4045         if (!dev)
4046                 goto out;
4047
4048         pci_read_config_dword(dev, SKX_CAPID6, &val);
4049         val &= SKX_CHA_BIT_MASK;
4050 out:
4051         pci_dev_put(dev);
4052         return hweight32(val);
4053 }
4054
4055 void skx_uncore_cpu_init(void)
4056 {
4057         skx_uncore_chabox.num_boxes = skx_count_chabox();
4058         uncore_msr_uncores = skx_msr_uncores;
4059 }
4060
4061 static struct intel_uncore_type skx_uncore_imc = {
4062         .name           = "imc",
4063         .num_counters   = 4,
4064         .num_boxes      = 6,
4065         .perf_ctr_bits  = 48,
4066         .fixed_ctr_bits = 48,
4067         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4068         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4069         .event_descs    = hswep_uncore_imc_events,
4070         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4071         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4072         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4073         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4074         .ops            = &ivbep_uncore_pci_ops,
4075         .format_group   = &skx_uncore_format_group,
4076 };
4077
4078 static struct attribute *skx_upi_uncore_formats_attr[] = {
4079         &format_attr_event.attr,
4080         &format_attr_umask_ext.attr,
4081         &format_attr_edge.attr,
4082         &format_attr_inv.attr,
4083         &format_attr_thresh8.attr,
4084         NULL,
4085 };
4086
4087 static const struct attribute_group skx_upi_uncore_format_group = {
4088         .name = "format",
4089         .attrs = skx_upi_uncore_formats_attr,
4090 };
4091
4092 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4093 {
4094         struct pci_dev *pdev = box->pci_dev;
4095
4096         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4097         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4098 }
4099
4100 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4101         .init_box       = skx_upi_uncore_pci_init_box,
4102         .disable_box    = snbep_uncore_pci_disable_box,
4103         .enable_box     = snbep_uncore_pci_enable_box,
4104         .disable_event  = snbep_uncore_pci_disable_event,
4105         .enable_event   = snbep_uncore_pci_enable_event,
4106         .read_counter   = snbep_uncore_pci_read_counter,
4107 };
4108
4109 static struct intel_uncore_type skx_uncore_upi = {
4110         .name           = "upi",
4111         .num_counters   = 4,
4112         .num_boxes      = 3,
4113         .perf_ctr_bits  = 48,
4114         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
4115         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
4116         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4117         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4118         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
4119         .ops            = &skx_upi_uncore_pci_ops,
4120         .format_group   = &skx_upi_uncore_format_group,
4121 };
4122
4123 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4124 {
4125         struct pci_dev *pdev = box->pci_dev;
4126
4127         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4128         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4129 }
4130
4131 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4132         .init_box       = skx_m2m_uncore_pci_init_box,
4133         .disable_box    = snbep_uncore_pci_disable_box,
4134         .enable_box     = snbep_uncore_pci_enable_box,
4135         .disable_event  = snbep_uncore_pci_disable_event,
4136         .enable_event   = snbep_uncore_pci_enable_event,
4137         .read_counter   = snbep_uncore_pci_read_counter,
4138 };
4139
4140 static struct intel_uncore_type skx_uncore_m2m = {
4141         .name           = "m2m",
4142         .num_counters   = 4,
4143         .num_boxes      = 2,
4144         .perf_ctr_bits  = 48,
4145         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
4146         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
4147         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4148         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
4149         .ops            = &skx_m2m_uncore_pci_ops,
4150         .format_group   = &skx_uncore_format_group,
4151 };
4152
4153 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4154         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4155         EVENT_CONSTRAINT_END
4156 };
4157
4158 static struct intel_uncore_type skx_uncore_m2pcie = {
4159         .name           = "m2pcie",
4160         .num_counters   = 4,
4161         .num_boxes      = 4,
4162         .perf_ctr_bits  = 48,
4163         .constraints    = skx_uncore_m2pcie_constraints,
4164         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4165         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4166         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4167         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4168         .ops            = &ivbep_uncore_pci_ops,
4169         .format_group   = &skx_uncore_format_group,
4170 };
4171
4172 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4173         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4174         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4175         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4176         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4177         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4178         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4179         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4180         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4181         EVENT_CONSTRAINT_END
4182 };
4183
4184 static struct intel_uncore_type skx_uncore_m3upi = {
4185         .name           = "m3upi",
4186         .num_counters   = 3,
4187         .num_boxes      = 3,
4188         .perf_ctr_bits  = 48,
4189         .constraints    = skx_uncore_m3upi_constraints,
4190         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4191         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4192         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4193         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4194         .ops            = &ivbep_uncore_pci_ops,
4195         .format_group   = &skx_uncore_format_group,
4196 };
4197
4198 enum {
4199         SKX_PCI_UNCORE_IMC,
4200         SKX_PCI_UNCORE_M2M,
4201         SKX_PCI_UNCORE_UPI,
4202         SKX_PCI_UNCORE_M2PCIE,
4203         SKX_PCI_UNCORE_M3UPI,
4204 };
4205
4206 static struct intel_uncore_type *skx_pci_uncores[] = {
4207         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
4208         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
4209         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
4210         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4211         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
4212         NULL,
4213 };
4214
4215 static const struct pci_device_id skx_uncore_pci_ids[] = {
4216         { /* MC0 Channel 0 */
4217                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4218                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4219         },
4220         { /* MC0 Channel 1 */
4221                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4222                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4223         },
4224         { /* MC0 Channel 2 */
4225                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4226                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4227         },
4228         { /* MC1 Channel 0 */
4229                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4230                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4231         },
4232         { /* MC1 Channel 1 */
4233                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4234                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4235         },
4236         { /* MC1 Channel 2 */
4237                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4238                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4239         },
4240         { /* M2M0 */
4241                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4242                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4243         },
4244         { /* M2M1 */
4245                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4246                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4247         },
4248         { /* UPI0 Link 0 */
4249                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4250                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4251         },
4252         { /* UPI0 Link 1 */
4253                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4254                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4255         },
4256         { /* UPI1 Link 2 */
4257                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4258                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4259         },
4260         { /* M2PCIe 0 */
4261                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4262                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4263         },
4264         { /* M2PCIe 1 */
4265                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4266                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4267         },
4268         { /* M2PCIe 2 */
4269                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4270                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4271         },
4272         { /* M2PCIe 3 */
4273                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4274                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4275         },
4276         { /* M3UPI0 Link 0 */
4277                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4278                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4279         },
4280         { /* M3UPI0 Link 1 */
4281                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4282                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4283         },
4284         { /* M3UPI1 Link 2 */
4285                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4286                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4287         },
4288         { /* end: all zeroes */ }
4289 };
4290
4291
4292 static struct pci_driver skx_uncore_pci_driver = {
4293         .name           = "skx_uncore",
4294         .id_table       = skx_uncore_pci_ids,
4295 };
4296
4297 int skx_uncore_pci_init(void)
4298 {
4299         /* need to double check pci address */
4300         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4301
4302         if (ret)
4303                 return ret;
4304
4305         uncore_pci_uncores = skx_pci_uncores;
4306         uncore_pci_driver = &skx_uncore_pci_driver;
4307         return 0;
4308 }
4309
4310 /* end of SKX uncore support */
4311
4312 /* SNR uncore support */
4313
4314 static struct intel_uncore_type snr_uncore_ubox = {
4315         .name                   = "ubox",
4316         .num_counters           = 2,
4317         .num_boxes              = 1,
4318         .perf_ctr_bits          = 48,
4319         .fixed_ctr_bits         = 48,
4320         .perf_ctr               = SNR_U_MSR_PMON_CTR0,
4321         .event_ctl              = SNR_U_MSR_PMON_CTL0,
4322         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4323         .fixed_ctr              = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4324         .fixed_ctl              = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4325         .ops                    = &ivbep_uncore_msr_ops,
4326         .format_group           = &ivbep_uncore_format_group,
4327 };
4328
4329 static struct attribute *snr_uncore_cha_formats_attr[] = {
4330         &format_attr_event.attr,
4331         &format_attr_umask_ext2.attr,
4332         &format_attr_edge.attr,
4333         &format_attr_tid_en.attr,
4334         &format_attr_inv.attr,
4335         &format_attr_thresh8.attr,
4336         &format_attr_filter_tid5.attr,
4337         NULL,
4338 };
4339 static const struct attribute_group snr_uncore_chabox_format_group = {
4340         .name = "format",
4341         .attrs = snr_uncore_cha_formats_attr,
4342 };
4343
4344 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4345 {
4346         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4347
4348         reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4349                     box->pmu->type->msr_offset * box->pmu->pmu_idx;
4350         reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4351         reg1->idx = 0;
4352
4353         return 0;
4354 }
4355
4356 static void snr_cha_enable_event(struct intel_uncore_box *box,
4357                                    struct perf_event *event)
4358 {
4359         struct hw_perf_event *hwc = &event->hw;
4360         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4361
4362         if (reg1->idx != EXTRA_REG_NONE)
4363                 wrmsrl(reg1->reg, reg1->config);
4364
4365         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4366 }
4367
4368 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4369         .init_box               = ivbep_uncore_msr_init_box,
4370         .disable_box            = snbep_uncore_msr_disable_box,
4371         .enable_box             = snbep_uncore_msr_enable_box,
4372         .disable_event          = snbep_uncore_msr_disable_event,
4373         .enable_event           = snr_cha_enable_event,
4374         .read_counter           = uncore_msr_read_counter,
4375         .hw_config              = snr_cha_hw_config,
4376 };
4377
4378 static struct intel_uncore_type snr_uncore_chabox = {
4379         .name                   = "cha",
4380         .num_counters           = 4,
4381         .num_boxes              = 6,
4382         .perf_ctr_bits          = 48,
4383         .event_ctl              = SNR_CHA_MSR_PMON_CTL0,
4384         .perf_ctr               = SNR_CHA_MSR_PMON_CTR0,
4385         .box_ctl                = SNR_CHA_MSR_PMON_BOX_CTL,
4386         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
4387         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4388         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4389         .ops                    = &snr_uncore_chabox_ops,
4390         .format_group           = &snr_uncore_chabox_format_group,
4391 };
4392
4393 static struct attribute *snr_uncore_iio_formats_attr[] = {
4394         &format_attr_event.attr,
4395         &format_attr_umask.attr,
4396         &format_attr_edge.attr,
4397         &format_attr_inv.attr,
4398         &format_attr_thresh9.attr,
4399         &format_attr_ch_mask2.attr,
4400         &format_attr_fc_mask2.attr,
4401         NULL,
4402 };
4403
4404 static const struct attribute_group snr_uncore_iio_format_group = {
4405         .name = "format",
4406         .attrs = snr_uncore_iio_formats_attr,
4407 };
4408
4409 static struct intel_uncore_type snr_uncore_iio = {
4410         .name                   = "iio",
4411         .num_counters           = 4,
4412         .num_boxes              = 5,
4413         .perf_ctr_bits          = 48,
4414         .event_ctl              = SNR_IIO_MSR_PMON_CTL0,
4415         .perf_ctr               = SNR_IIO_MSR_PMON_CTR0,
4416         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4417         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4418         .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
4419         .msr_offset             = SNR_IIO_MSR_OFFSET,
4420         .ops                    = &ivbep_uncore_msr_ops,
4421         .format_group           = &snr_uncore_iio_format_group,
4422 };
4423
4424 static struct intel_uncore_type snr_uncore_irp = {
4425         .name                   = "irp",
4426         .num_counters           = 2,
4427         .num_boxes              = 5,
4428         .perf_ctr_bits          = 48,
4429         .event_ctl              = SNR_IRP0_MSR_PMON_CTL0,
4430         .perf_ctr               = SNR_IRP0_MSR_PMON_CTR0,
4431         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4432         .box_ctl                = SNR_IRP0_MSR_PMON_BOX_CTL,
4433         .msr_offset             = SNR_IRP_MSR_OFFSET,
4434         .ops                    = &ivbep_uncore_msr_ops,
4435         .format_group           = &ivbep_uncore_format_group,
4436 };
4437
4438 static struct intel_uncore_type snr_uncore_m2pcie = {
4439         .name           = "m2pcie",
4440         .num_counters   = 4,
4441         .num_boxes      = 5,
4442         .perf_ctr_bits  = 48,
4443         .event_ctl      = SNR_M2PCIE_MSR_PMON_CTL0,
4444         .perf_ctr       = SNR_M2PCIE_MSR_PMON_CTR0,
4445         .box_ctl        = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4446         .msr_offset     = SNR_M2PCIE_MSR_OFFSET,
4447         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4448         .ops            = &ivbep_uncore_msr_ops,
4449         .format_group   = &ivbep_uncore_format_group,
4450 };
4451
4452 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4453 {
4454         struct hw_perf_event *hwc = &event->hw;
4455         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4456         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4457
4458         if (ev_sel >= 0xb && ev_sel <= 0xe) {
4459                 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4460                 reg1->idx = ev_sel - 0xb;
4461                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4462         }
4463         return 0;
4464 }
4465
4466 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4467         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4468         .hw_config              = snr_pcu_hw_config,
4469         .get_constraint         = snbep_pcu_get_constraint,
4470         .put_constraint         = snbep_pcu_put_constraint,
4471 };
4472
4473 static struct intel_uncore_type snr_uncore_pcu = {
4474         .name                   = "pcu",
4475         .num_counters           = 4,
4476         .num_boxes              = 1,
4477         .perf_ctr_bits          = 48,
4478         .perf_ctr               = SNR_PCU_MSR_PMON_CTR0,
4479         .event_ctl              = SNR_PCU_MSR_PMON_CTL0,
4480         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4481         .box_ctl                = SNR_PCU_MSR_PMON_BOX_CTL,
4482         .num_shared_regs        = 1,
4483         .ops                    = &snr_uncore_pcu_ops,
4484         .format_group           = &skx_uncore_pcu_format_group,
4485 };
4486
4487 enum perf_uncore_snr_iio_freerunning_type_id {
4488         SNR_IIO_MSR_IOCLK,
4489         SNR_IIO_MSR_BW_IN,
4490
4491         SNR_IIO_FREERUNNING_TYPE_MAX,
4492 };
4493
4494 static struct freerunning_counters snr_iio_freerunning[] = {
4495         [SNR_IIO_MSR_IOCLK]     = { 0x1eac, 0x1, 0x10, 1, 48 },
4496         [SNR_IIO_MSR_BW_IN]     = { 0x1f00, 0x1, 0x10, 8, 48 },
4497 };
4498
4499 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4500         /* Free-Running IIO CLOCKS Counter */
4501         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
4502         /* Free-Running IIO BANDWIDTH IN Counters */
4503         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
4504         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
4505         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
4506         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
4507         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
4508         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
4509         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
4510         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
4511         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
4512         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
4513         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
4514         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
4515         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
4516         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
4517         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
4518         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
4519         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
4520         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
4521         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
4522         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
4523         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
4524         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
4525         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
4526         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
4527         { /* end: all zeroes */ },
4528 };
4529
4530 static struct intel_uncore_type snr_uncore_iio_free_running = {
4531         .name                   = "iio_free_running",
4532         .num_counters           = 9,
4533         .num_boxes              = 5,
4534         .num_freerunning_types  = SNR_IIO_FREERUNNING_TYPE_MAX,
4535         .freerunning            = snr_iio_freerunning,
4536         .ops                    = &skx_uncore_iio_freerunning_ops,
4537         .event_descs            = snr_uncore_iio_freerunning_events,
4538         .format_group           = &skx_uncore_iio_freerunning_format_group,
4539 };
4540
4541 static struct intel_uncore_type *snr_msr_uncores[] = {
4542         &snr_uncore_ubox,
4543         &snr_uncore_chabox,
4544         &snr_uncore_iio,
4545         &snr_uncore_irp,
4546         &snr_uncore_m2pcie,
4547         &snr_uncore_pcu,
4548         &snr_uncore_iio_free_running,
4549         NULL,
4550 };
4551
4552 void snr_uncore_cpu_init(void)
4553 {
4554         uncore_msr_uncores = snr_msr_uncores;
4555 }
4556
4557 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4558 {
4559         struct pci_dev *pdev = box->pci_dev;
4560         int box_ctl = uncore_pci_box_ctl(box);
4561
4562         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4563         pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4564 }
4565
4566 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4567         .init_box       = snr_m2m_uncore_pci_init_box,
4568         .disable_box    = snbep_uncore_pci_disable_box,
4569         .enable_box     = snbep_uncore_pci_enable_box,
4570         .disable_event  = snbep_uncore_pci_disable_event,
4571         .enable_event   = snbep_uncore_pci_enable_event,
4572         .read_counter   = snbep_uncore_pci_read_counter,
4573 };
4574
4575 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4576         &format_attr_event.attr,
4577         &format_attr_umask_ext3.attr,
4578         &format_attr_edge.attr,
4579         &format_attr_inv.attr,
4580         &format_attr_thresh8.attr,
4581         NULL,
4582 };
4583
4584 static const struct attribute_group snr_m2m_uncore_format_group = {
4585         .name = "format",
4586         .attrs = snr_m2m_uncore_formats_attr,
4587 };
4588
4589 static struct intel_uncore_type snr_uncore_m2m = {
4590         .name           = "m2m",
4591         .num_counters   = 4,
4592         .num_boxes      = 1,
4593         .perf_ctr_bits  = 48,
4594         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
4595         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
4596         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4597         .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4598         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
4599         .ops            = &snr_m2m_uncore_pci_ops,
4600         .format_group   = &snr_m2m_uncore_format_group,
4601 };
4602
4603 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4604 {
4605         struct pci_dev *pdev = box->pci_dev;
4606         struct hw_perf_event *hwc = &event->hw;
4607
4608         pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4609         pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4610 }
4611
4612 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4613         .init_box       = snr_m2m_uncore_pci_init_box,
4614         .disable_box    = snbep_uncore_pci_disable_box,
4615         .enable_box     = snbep_uncore_pci_enable_box,
4616         .disable_event  = snbep_uncore_pci_disable_event,
4617         .enable_event   = snr_uncore_pci_enable_event,
4618         .read_counter   = snbep_uncore_pci_read_counter,
4619 };
4620
4621 static struct intel_uncore_type snr_uncore_pcie3 = {
4622         .name           = "pcie3",
4623         .num_counters   = 4,
4624         .num_boxes      = 1,
4625         .perf_ctr_bits  = 48,
4626         .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
4627         .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
4628         .event_mask     = SKX_IIO_PMON_RAW_EVENT_MASK,
4629         .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4630         .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
4631         .ops            = &snr_pcie3_uncore_pci_ops,
4632         .format_group   = &skx_uncore_iio_format_group,
4633 };
4634
4635 enum {
4636         SNR_PCI_UNCORE_M2M,
4637         SNR_PCI_UNCORE_PCIE3,
4638 };
4639
4640 static struct intel_uncore_type *snr_pci_uncores[] = {
4641         [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
4642         [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
4643         NULL,
4644 };
4645
4646 static const struct pci_device_id snr_uncore_pci_ids[] = {
4647         { /* M2M */
4648                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4649                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4650         },
4651         { /* end: all zeroes */ }
4652 };
4653
4654 static struct pci_driver snr_uncore_pci_driver = {
4655         .name           = "snr_uncore",
4656         .id_table       = snr_uncore_pci_ids,
4657 };
4658
4659 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4660         { /* PCIe3 RP */
4661                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4662                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4663         },
4664         { /* end: all zeroes */ }
4665 };
4666
4667 static struct pci_driver snr_uncore_pci_sub_driver = {
4668         .name           = "snr_uncore_sub",
4669         .id_table       = snr_uncore_pci_sub_ids,
4670 };
4671
4672 int snr_uncore_pci_init(void)
4673 {
4674         /* SNR UBOX DID */
4675         int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4676                                          SKX_GIDNIDMAP, true);
4677
4678         if (ret)
4679                 return ret;
4680
4681         uncore_pci_uncores = snr_pci_uncores;
4682         uncore_pci_driver = &snr_uncore_pci_driver;
4683         uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4684         return 0;
4685 }
4686
4687 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4688 {
4689         struct pci_dev *mc_dev = NULL;
4690         int pkg;
4691
4692         while (1) {
4693                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4694                 if (!mc_dev)
4695                         break;
4696                 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4697                 if (pkg == id)
4698                         break;
4699         }
4700         return mc_dev;
4701 }
4702
4703 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4704                                        unsigned int box_ctl, int mem_offset)
4705 {
4706         struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4707         struct intel_uncore_type *type = box->pmu->type;
4708         resource_size_t addr;
4709         u32 pci_dword;
4710
4711         if (!pdev)
4712                 return;
4713
4714         pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4715         addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4716
4717         pci_read_config_dword(pdev, mem_offset, &pci_dword);
4718         addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4719
4720         addr += box_ctl;
4721
4722         box->io_addr = ioremap(addr, type->mmio_map_size);
4723         if (!box->io_addr) {
4724                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4725                 return;
4726         }
4727
4728         writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4729 }
4730
4731 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4732 {
4733         __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4734                                    SNR_IMC_MMIO_MEM0_OFFSET);
4735 }
4736
4737 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4738 {
4739         u32 config;
4740
4741         if (!box->io_addr)
4742                 return;
4743
4744         config = readl(box->io_addr);
4745         config |= SNBEP_PMON_BOX_CTL_FRZ;
4746         writel(config, box->io_addr);
4747 }
4748
4749 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4750 {
4751         u32 config;
4752
4753         if (!box->io_addr)
4754                 return;
4755
4756         config = readl(box->io_addr);
4757         config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4758         writel(config, box->io_addr);
4759 }
4760
4761 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4762                                            struct perf_event *event)
4763 {
4764         struct hw_perf_event *hwc = &event->hw;
4765
4766         if (!box->io_addr)
4767                 return;
4768
4769         if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4770                 return;
4771
4772         writel(hwc->config | SNBEP_PMON_CTL_EN,
4773                box->io_addr + hwc->config_base);
4774 }
4775
4776 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4777                                             struct perf_event *event)
4778 {
4779         struct hw_perf_event *hwc = &event->hw;
4780
4781         if (!box->io_addr)
4782                 return;
4783
4784         if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4785                 return;
4786
4787         writel(hwc->config, box->io_addr + hwc->config_base);
4788 }
4789
4790 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4791         .init_box       = snr_uncore_mmio_init_box,
4792         .exit_box       = uncore_mmio_exit_box,
4793         .disable_box    = snr_uncore_mmio_disable_box,
4794         .enable_box     = snr_uncore_mmio_enable_box,
4795         .disable_event  = snr_uncore_mmio_disable_event,
4796         .enable_event   = snr_uncore_mmio_enable_event,
4797         .read_counter   = uncore_mmio_read_counter,
4798 };
4799
4800 static struct uncore_event_desc snr_uncore_imc_events[] = {
4801         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4802         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4803         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4804         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4805         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4806         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4807         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4808         { /* end: all zeroes */ },
4809 };
4810
4811 static struct intel_uncore_type snr_uncore_imc = {
4812         .name           = "imc",
4813         .num_counters   = 4,
4814         .num_boxes      = 2,
4815         .perf_ctr_bits  = 48,
4816         .fixed_ctr_bits = 48,
4817         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
4818         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
4819         .event_descs    = snr_uncore_imc_events,
4820         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
4821         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
4822         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4823         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
4824         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
4825         .mmio_map_size  = SNR_IMC_MMIO_SIZE,
4826         .ops            = &snr_uncore_mmio_ops,
4827         .format_group   = &skx_uncore_format_group,
4828 };
4829
4830 enum perf_uncore_snr_imc_freerunning_type_id {
4831         SNR_IMC_DCLK,
4832         SNR_IMC_DDR,
4833
4834         SNR_IMC_FREERUNNING_TYPE_MAX,
4835 };
4836
4837 static struct freerunning_counters snr_imc_freerunning[] = {
4838         [SNR_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
4839         [SNR_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
4840 };
4841
4842 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4843         INTEL_UNCORE_EVENT_DESC(dclk,           "event=0xff,umask=0x10"),
4844
4845         INTEL_UNCORE_EVENT_DESC(read,           "event=0xff,umask=0x20"),
4846         INTEL_UNCORE_EVENT_DESC(read.scale,     "6.103515625e-5"),
4847         INTEL_UNCORE_EVENT_DESC(read.unit,      "MiB"),
4848         INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
4849         INTEL_UNCORE_EVENT_DESC(write.scale,    "6.103515625e-5"),
4850         INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
4851         { /* end: all zeroes */ },
4852 };
4853
4854 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4855         .init_box       = snr_uncore_mmio_init_box,
4856         .exit_box       = uncore_mmio_exit_box,
4857         .read_counter   = uncore_mmio_read_counter,
4858         .hw_config      = uncore_freerunning_hw_config,
4859 };
4860
4861 static struct intel_uncore_type snr_uncore_imc_free_running = {
4862         .name                   = "imc_free_running",
4863         .num_counters           = 3,
4864         .num_boxes              = 1,
4865         .num_freerunning_types  = SNR_IMC_FREERUNNING_TYPE_MAX,
4866         .mmio_map_size          = SNR_IMC_MMIO_SIZE,
4867         .freerunning            = snr_imc_freerunning,
4868         .ops                    = &snr_uncore_imc_freerunning_ops,
4869         .event_descs            = snr_uncore_imc_freerunning_events,
4870         .format_group           = &skx_uncore_iio_freerunning_format_group,
4871 };
4872
4873 static struct intel_uncore_type *snr_mmio_uncores[] = {
4874         &snr_uncore_imc,
4875         &snr_uncore_imc_free_running,
4876         NULL,
4877 };
4878
4879 void snr_uncore_mmio_init(void)
4880 {
4881         uncore_mmio_uncores = snr_mmio_uncores;
4882 }
4883
4884 /* end of SNR uncore support */
4885
4886 /* ICX uncore support */
4887
4888 static unsigned icx_cha_msr_offsets[] = {
4889         0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4890         0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4891         0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4892         0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
4893         0x1c,  0x2a,  0x38,  0x46,
4894 };
4895
4896 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4897 {
4898         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4899         bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4900
4901         if (tie_en) {
4902                 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4903                             icx_cha_msr_offsets[box->pmu->pmu_idx];
4904                 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4905                 reg1->idx = 0;
4906         }
4907
4908         return 0;
4909 }
4910
4911 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4912         .init_box               = ivbep_uncore_msr_init_box,
4913         .disable_box            = snbep_uncore_msr_disable_box,
4914         .enable_box             = snbep_uncore_msr_enable_box,
4915         .disable_event          = snbep_uncore_msr_disable_event,
4916         .enable_event           = snr_cha_enable_event,
4917         .read_counter           = uncore_msr_read_counter,
4918         .hw_config              = icx_cha_hw_config,
4919 };
4920
4921 static struct intel_uncore_type icx_uncore_chabox = {
4922         .name                   = "cha",
4923         .num_counters           = 4,
4924         .perf_ctr_bits          = 48,
4925         .event_ctl              = ICX_C34_MSR_PMON_CTL0,
4926         .perf_ctr               = ICX_C34_MSR_PMON_CTR0,
4927         .box_ctl                = ICX_C34_MSR_PMON_BOX_CTL,
4928         .msr_offsets            = icx_cha_msr_offsets,
4929         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4930         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4931         .constraints            = skx_uncore_chabox_constraints,
4932         .ops                    = &icx_uncore_chabox_ops,
4933         .format_group           = &snr_uncore_chabox_format_group,
4934 };
4935
4936 static unsigned icx_msr_offsets[] = {
4937         0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4938 };
4939
4940 static struct event_constraint icx_uncore_iio_constraints[] = {
4941         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4942         UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4943         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4944         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4945         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4946         EVENT_CONSTRAINT_END
4947 };
4948
4949 static struct intel_uncore_type icx_uncore_iio = {
4950         .name                   = "iio",
4951         .num_counters           = 4,
4952         .num_boxes              = 6,
4953         .perf_ctr_bits          = 48,
4954         .event_ctl              = ICX_IIO_MSR_PMON_CTL0,
4955         .perf_ctr               = ICX_IIO_MSR_PMON_CTR0,
4956         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4957         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4958         .box_ctl                = ICX_IIO_MSR_PMON_BOX_CTL,
4959         .msr_offsets            = icx_msr_offsets,
4960         .constraints            = icx_uncore_iio_constraints,
4961         .ops                    = &skx_uncore_iio_ops,
4962         .format_group           = &snr_uncore_iio_format_group,
4963 };
4964
4965 static struct intel_uncore_type icx_uncore_irp = {
4966         .name                   = "irp",
4967         .num_counters           = 2,
4968         .num_boxes              = 6,
4969         .perf_ctr_bits          = 48,
4970         .event_ctl              = ICX_IRP0_MSR_PMON_CTL0,
4971         .perf_ctr               = ICX_IRP0_MSR_PMON_CTR0,
4972         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4973         .box_ctl                = ICX_IRP0_MSR_PMON_BOX_CTL,
4974         .msr_offsets            = icx_msr_offsets,
4975         .ops                    = &ivbep_uncore_msr_ops,
4976         .format_group           = &ivbep_uncore_format_group,
4977 };
4978
4979 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4980         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4981         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4982         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4983         EVENT_CONSTRAINT_END
4984 };
4985
4986 static struct intel_uncore_type icx_uncore_m2pcie = {
4987         .name           = "m2pcie",
4988         .num_counters   = 4,
4989         .num_boxes      = 6,
4990         .perf_ctr_bits  = 48,
4991         .event_ctl      = ICX_M2PCIE_MSR_PMON_CTL0,
4992         .perf_ctr       = ICX_M2PCIE_MSR_PMON_CTR0,
4993         .box_ctl        = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4994         .msr_offsets    = icx_msr_offsets,
4995         .constraints    = icx_uncore_m2pcie_constraints,
4996         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4997         .ops            = &ivbep_uncore_msr_ops,
4998         .format_group   = &ivbep_uncore_format_group,
4999 };
5000
5001 enum perf_uncore_icx_iio_freerunning_type_id {
5002         ICX_IIO_MSR_IOCLK,
5003         ICX_IIO_MSR_BW_IN,
5004
5005         ICX_IIO_FREERUNNING_TYPE_MAX,
5006 };
5007
5008 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5009         0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5010 };
5011
5012 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5013         0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5014 };
5015
5016 static struct freerunning_counters icx_iio_freerunning[] = {
5017         [ICX_IIO_MSR_IOCLK]     = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5018         [ICX_IIO_MSR_BW_IN]     = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5019 };
5020
5021 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5022         /* Free-Running IIO CLOCKS Counter */
5023         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
5024         /* Free-Running IIO BANDWIDTH IN Counters */
5025         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
5026         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
5027         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
5028         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
5029         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
5030         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
5031         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
5032         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
5033         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
5034         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
5035         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
5036         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
5037         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
5038         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
5039         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
5040         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
5041         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
5042         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
5043         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
5044         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
5045         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
5046         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
5047         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
5048         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
5049         { /* end: all zeroes */ },
5050 };
5051
5052 static struct intel_uncore_type icx_uncore_iio_free_running = {
5053         .name                   = "iio_free_running",
5054         .num_counters           = 9,
5055         .num_boxes              = 6,
5056         .num_freerunning_types  = ICX_IIO_FREERUNNING_TYPE_MAX,
5057         .freerunning            = icx_iio_freerunning,
5058         .ops                    = &skx_uncore_iio_freerunning_ops,
5059         .event_descs            = icx_uncore_iio_freerunning_events,
5060         .format_group           = &skx_uncore_iio_freerunning_format_group,
5061 };
5062
5063 static struct intel_uncore_type *icx_msr_uncores[] = {
5064         &skx_uncore_ubox,
5065         &icx_uncore_chabox,
5066         &icx_uncore_iio,
5067         &icx_uncore_irp,
5068         &icx_uncore_m2pcie,
5069         &skx_uncore_pcu,
5070         &icx_uncore_iio_free_running,
5071         NULL,
5072 };
5073
5074 /*
5075  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5076  * registers which located at Device 30, Function 3
5077  */
5078 #define ICX_CAPID6              0x9c
5079 #define ICX_CAPID7              0xa0
5080
5081 static u64 icx_count_chabox(void)
5082 {
5083         struct pci_dev *dev = NULL;
5084         u64 caps = 0;
5085
5086         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5087         if (!dev)
5088                 goto out;
5089
5090         pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5091         pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5092 out:
5093         pci_dev_put(dev);
5094         return hweight64(caps);
5095 }
5096
5097 void icx_uncore_cpu_init(void)
5098 {
5099         u64 num_boxes = icx_count_chabox();
5100
5101         if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5102                 return;
5103         icx_uncore_chabox.num_boxes = num_boxes;
5104         uncore_msr_uncores = icx_msr_uncores;
5105 }
5106
5107 static struct intel_uncore_type icx_uncore_m2m = {
5108         .name           = "m2m",
5109         .num_counters   = 4,
5110         .num_boxes      = 4,
5111         .perf_ctr_bits  = 48,
5112         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
5113         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
5114         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5115         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
5116         .ops            = &snr_m2m_uncore_pci_ops,
5117         .format_group   = &skx_uncore_format_group,
5118 };
5119
5120 static struct attribute *icx_upi_uncore_formats_attr[] = {
5121         &format_attr_event.attr,
5122         &format_attr_umask_ext4.attr,
5123         &format_attr_edge.attr,
5124         &format_attr_inv.attr,
5125         &format_attr_thresh8.attr,
5126         NULL,
5127 };
5128
5129 static const struct attribute_group icx_upi_uncore_format_group = {
5130         .name = "format",
5131         .attrs = icx_upi_uncore_formats_attr,
5132 };
5133
5134 static struct intel_uncore_type icx_uncore_upi = {
5135         .name           = "upi",
5136         .num_counters   = 4,
5137         .num_boxes      = 3,
5138         .perf_ctr_bits  = 48,
5139         .perf_ctr       = ICX_UPI_PCI_PMON_CTR0,
5140         .event_ctl      = ICX_UPI_PCI_PMON_CTL0,
5141         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5142         .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5143         .box_ctl        = ICX_UPI_PCI_PMON_BOX_CTL,
5144         .ops            = &skx_upi_uncore_pci_ops,
5145         .format_group   = &icx_upi_uncore_format_group,
5146 };
5147
5148 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5149         UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5150         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5151         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5152         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5153         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5154         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5155         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5156         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5157         EVENT_CONSTRAINT_END
5158 };
5159
5160 static struct intel_uncore_type icx_uncore_m3upi = {
5161         .name           = "m3upi",
5162         .num_counters   = 4,
5163         .num_boxes      = 3,
5164         .perf_ctr_bits  = 48,
5165         .perf_ctr       = ICX_M3UPI_PCI_PMON_CTR0,
5166         .event_ctl      = ICX_M3UPI_PCI_PMON_CTL0,
5167         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5168         .box_ctl        = ICX_M3UPI_PCI_PMON_BOX_CTL,
5169         .constraints    = icx_uncore_m3upi_constraints,
5170         .ops            = &ivbep_uncore_pci_ops,
5171         .format_group   = &skx_uncore_format_group,
5172 };
5173
5174 enum {
5175         ICX_PCI_UNCORE_M2M,
5176         ICX_PCI_UNCORE_UPI,
5177         ICX_PCI_UNCORE_M3UPI,
5178 };
5179
5180 static struct intel_uncore_type *icx_pci_uncores[] = {
5181         [ICX_PCI_UNCORE_M2M]            = &icx_uncore_m2m,
5182         [ICX_PCI_UNCORE_UPI]            = &icx_uncore_upi,
5183         [ICX_PCI_UNCORE_M3UPI]          = &icx_uncore_m3upi,
5184         NULL,
5185 };
5186
5187 static const struct pci_device_id icx_uncore_pci_ids[] = {
5188         { /* M2M 0 */
5189                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5190                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5191         },
5192         { /* M2M 1 */
5193                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5194                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5195         },
5196         { /* M2M 2 */
5197                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5198                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5199         },
5200         { /* M2M 3 */
5201                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5202                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5203         },
5204         { /* UPI Link 0 */
5205                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5206                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5207         },
5208         { /* UPI Link 1 */
5209                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5210                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5211         },
5212         { /* UPI Link 2 */
5213                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5214                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5215         },
5216         { /* M3UPI Link 0 */
5217                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5218                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5219         },
5220         { /* M3UPI Link 1 */
5221                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5222                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5223         },
5224         { /* M3UPI Link 2 */
5225                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5226                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5227         },
5228         { /* end: all zeroes */ }
5229 };
5230
5231 static struct pci_driver icx_uncore_pci_driver = {
5232         .name           = "icx_uncore",
5233         .id_table       = icx_uncore_pci_ids,
5234 };
5235
5236 int icx_uncore_pci_init(void)
5237 {
5238         /* ICX UBOX DID */
5239         int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5240                                          SKX_GIDNIDMAP, true);
5241
5242         if (ret)
5243                 return ret;
5244
5245         uncore_pci_uncores = icx_pci_uncores;
5246         uncore_pci_driver = &icx_uncore_pci_driver;
5247         return 0;
5248 }
5249
5250 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5251 {
5252         unsigned int box_ctl = box->pmu->type->box_ctl +
5253                                box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5254         int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5255                          SNR_IMC_MMIO_MEM0_OFFSET;
5256
5257         __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5258 }
5259
5260 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5261         .init_box       = icx_uncore_imc_init_box,
5262         .exit_box       = uncore_mmio_exit_box,
5263         .disable_box    = snr_uncore_mmio_disable_box,
5264         .enable_box     = snr_uncore_mmio_enable_box,
5265         .disable_event  = snr_uncore_mmio_disable_event,
5266         .enable_event   = snr_uncore_mmio_enable_event,
5267         .read_counter   = uncore_mmio_read_counter,
5268 };
5269
5270 static struct intel_uncore_type icx_uncore_imc = {
5271         .name           = "imc",
5272         .num_counters   = 4,
5273         .num_boxes      = 8,
5274         .perf_ctr_bits  = 48,
5275         .fixed_ctr_bits = 48,
5276         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
5277         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
5278         .event_descs    = hswep_uncore_imc_events,
5279         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
5280         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
5281         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5282         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
5283         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
5284         .mmio_map_size  = SNR_IMC_MMIO_SIZE,
5285         .ops            = &icx_uncore_mmio_ops,
5286         .format_group   = &skx_uncore_format_group,
5287 };
5288
5289 enum perf_uncore_icx_imc_freerunning_type_id {
5290         ICX_IMC_DCLK,
5291         ICX_IMC_DDR,
5292         ICX_IMC_DDRT,
5293
5294         ICX_IMC_FREERUNNING_TYPE_MAX,
5295 };
5296
5297 static struct freerunning_counters icx_imc_freerunning[] = {
5298         [ICX_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
5299         [ICX_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
5300         [ICX_IMC_DDRT]  = { 0x22a0, 0x8, 0, 2, 48 },
5301 };
5302
5303 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5304         INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
5305
5306         INTEL_UNCORE_EVENT_DESC(read,                   "event=0xff,umask=0x20"),
5307         INTEL_UNCORE_EVENT_DESC(read.scale,             "6.103515625e-5"),
5308         INTEL_UNCORE_EVENT_DESC(read.unit,              "MiB"),
5309         INTEL_UNCORE_EVENT_DESC(write,                  "event=0xff,umask=0x21"),
5310         INTEL_UNCORE_EVENT_DESC(write.scale,            "6.103515625e-5"),
5311         INTEL_UNCORE_EVENT_DESC(write.unit,             "MiB"),
5312
5313         INTEL_UNCORE_EVENT_DESC(ddrt_read,              "event=0xff,umask=0x30"),
5314         INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,        "6.103515625e-5"),
5315         INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,         "MiB"),
5316         INTEL_UNCORE_EVENT_DESC(ddrt_write,             "event=0xff,umask=0x31"),
5317         INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,       "6.103515625e-5"),
5318         INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,        "MiB"),
5319         { /* end: all zeroes */ },
5320 };
5321
5322 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5323 {
5324         int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5325                          SNR_IMC_MMIO_MEM0_OFFSET;
5326
5327         __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5328 }
5329
5330 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5331         .init_box       = icx_uncore_imc_freerunning_init_box,
5332         .exit_box       = uncore_mmio_exit_box,
5333         .read_counter   = uncore_mmio_read_counter,
5334         .hw_config      = uncore_freerunning_hw_config,
5335 };
5336
5337 static struct intel_uncore_type icx_uncore_imc_free_running = {
5338         .name                   = "imc_free_running",
5339         .num_counters           = 5,
5340         .num_boxes              = 4,
5341         .num_freerunning_types  = ICX_IMC_FREERUNNING_TYPE_MAX,
5342         .mmio_map_size          = SNR_IMC_MMIO_SIZE,
5343         .freerunning            = icx_imc_freerunning,
5344         .ops                    = &icx_uncore_imc_freerunning_ops,
5345         .event_descs            = icx_uncore_imc_freerunning_events,
5346         .format_group           = &skx_uncore_iio_freerunning_format_group,
5347 };
5348
5349 static struct intel_uncore_type *icx_mmio_uncores[] = {
5350         &icx_uncore_imc,
5351         &icx_uncore_imc_free_running,
5352         NULL,
5353 };
5354
5355 void icx_uncore_mmio_init(void)
5356 {
5357         uncore_mmio_uncores = icx_mmio_uncores;
5358 }
5359
5360 /* end of ICX uncore support */