Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID                 0x40
7 #define SNBEP_GIDNIDMAP                 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
16                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
20 #define SNBEP_PMON_CTL_RST              (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
23 #define SNBEP_PMON_CTL_EN               (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
27                                          SNBEP_PMON_CTL_UMASK_MASK | \
28                                          SNBEP_PMON_CTL_EDGE_DET | \
29                                          SNBEP_PMON_CTL_INVERT | \
30                                          SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
35                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36                                  SNBEP_PMON_CTL_UMASK_MASK | \
37                                  SNBEP_PMON_CTL_EDGE_DET | \
38                                  SNBEP_PMON_CTL_INVERT | \
39                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
43                                                  SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
51                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53                                  SNBEP_PMON_CTL_EDGE_DET | \
54                                  SNBEP_PMON_CTL_INVERT | \
55                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
60                                 (SNBEP_PMON_RAW_EVENT_MASK | \
61                                  SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
65 #define SNBEP_PCI_PMON_CTL0                     0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0                     0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
84 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
94 #define SNBEP_CBO_MSR_OFFSET                    0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
102         .event = (e),                           \
103         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
104         .config_mask = (m),                     \
105         .idx = (i)                              \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
121                                          SNBEP_PMON_CTL_UMASK_MASK | \
122                                          SNBEP_PMON_CTL_EDGE_DET | \
123                                          SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131                                  SNBEP_PMON_CTL_UMASK_MASK | \
132                                  SNBEP_PMON_CTL_EDGE_DET | \
133                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
136                                                  SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
150                                 (IVBEP_PMON_RAW_EVENT_MASK | \
151                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
154                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156                                  SNBEP_PMON_CTL_EDGE_DET | \
157                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
162                                 (IVBEP_PMON_RAW_EVENT_MASK | \
163                                  SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166                                 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0                   0x709
170 #define HSWEP_U_MSR_PMON_CTL0                   0x705
171 #define HSWEP_U_MSR_PMON_FILTER                 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
187 #define HSWEP_CBO_MSR_OFFSET                    0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
202 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
204 #define HSWEP_SBOX_MSR_OFFSET                   0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
206                                                  SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217                                                 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET                      0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223                                          KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
233 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
237 #define KNL_PMON_FIXED_CTL_EN                   0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
256                                                  KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265                                  SNBEP_PMON_CTL_EDGE_DET | \
266                                  SNBEP_CBO_PMON_CTL_TID_EN | \
267                                  SNBEP_PMON_CTL_INVERT | \
268                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID                   0xc0
274 #define SKX_GIDNIDMAP                   0xd4
275
276 /*
277  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278  * that BIOS programmed. MSR has package scope.
279  * |  Bit  |  Default  |  Description
280  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
281  *                       numbers have been initialized. (RO)
282  * |[62:48]|    ---    | Reserved
283  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
284  *                       CPUBUSNO(5). (RO)
285  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
286  *                       CPUBUSNO(4). (RO)
287  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
288  *                       CPUBUSNO(3). (RO)
289  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
290  *                       CPUBUSNO(2). (RO)
291  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
292  *                       CPUBUSNO(1). (RO)
293  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
294  *                       CPUBUSNO(0). (RO)
295  */
296 #define SKX_MSR_CPU_BUS_NUMBER          0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT       (1ULL << 63)
298 #define BUS_NUM_STRIDE                  8
299
300 /* SKX CHA */
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
314
315 /* SKX IIO */
316 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
319 #define SKX_IIO_MSR_OFFSET              0x20
320
321 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
323 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
326                                          SNBEP_PMON_CTL_UMASK_MASK | \
327                                          SNBEP_PMON_CTL_EDGE_DET | \
328                                          SNBEP_PMON_CTL_INVERT | \
329                                          SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331                                          SKX_PMON_CTL_CH_MASK | \
332                                          SKX_PMON_CTL_FC_MASK)
333
334 /* SKX IRP */
335 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
338 #define SKX_IRP_MSR_OFFSET              0x20
339
340 /* SKX UPI */
341 #define SKX_UPI_PCI_PMON_CTL0           0x350
342 #define SKX_UPI_PCI_PMON_CTR0           0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
344 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
345
346 /* SKX M2M */
347 #define SKX_M2M_PCI_PMON_CTL0           0x228
348 #define SKX_M2M_PCI_PMON_CTR0           0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
350
351 /* SNR Ubox */
352 #define SNR_U_MSR_PMON_CTR0                     0x1f98
353 #define SNR_U_MSR_PMON_CTL0                     0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL           0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR           0x1f94
356
357 /* SNR CHA */
358 #define SNR_CHA_RAW_EVENT_MASK_EXT              0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0                   0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0                   0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL                0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0             0x1c05
363
364
365 /* SNR IIO */
366 #define SNR_IIO_MSR_PMON_CTL0                   0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0                   0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL                0x1e00
369 #define SNR_IIO_MSR_OFFSET                      0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT         0x7ffff
371
372 /* SNR IRP */
373 #define SNR_IRP0_MSR_PMON_CTL0                  0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0                  0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL               0x1ea0
376 #define SNR_IRP_MSR_OFFSET                      0x10
377
378 /* SNR M2PCIE */
379 #define SNR_M2PCIE_MSR_PMON_CTL0                0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0                0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL             0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET                   0x10
383
384 /* SNR PCU */
385 #define SNR_PCU_MSR_PMON_CTL0                   0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0                   0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL                0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER             0x1efc
389
390 /* SNR M2M */
391 #define SNR_M2M_PCI_PMON_CTL0                   0x468
392 #define SNR_M2M_PCI_PMON_CTR0                   0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL                0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT              0xff
395
396 /* SNR PCIE3 */
397 #define SNR_PCIE3_PCI_PMON_CTL0                 0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0                 0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL              0x4e0
400
401 /* SNR IMC */
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL             0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR             0x38
404 #define SNR_IMC_MMIO_PMON_CTL0                  0x40
405 #define SNR_IMC_MMIO_PMON_CTR0                  0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL               0x22800
407 #define SNR_IMC_MMIO_OFFSET                     0x4000
408 #define SNR_IMC_MMIO_SIZE                       0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET                0xd0
410 #define SNR_IMC_MMIO_BASE_MASK                  0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET                0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK                  0x7FF
413
414 /* ICX CHA */
415 #define ICX_C34_MSR_PMON_CTR0                   0xb68
416 #define ICX_C34_MSR_PMON_CTL0                   0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL                0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0            0xb65
419
420 /* ICX IIO */
421 #define ICX_IIO_MSR_PMON_CTL0                   0xa58
422 #define ICX_IIO_MSR_PMON_CTR0                   0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL                0xa50
424
425 /* ICX IRP */
426 #define ICX_IRP0_MSR_PMON_CTL0                  0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0                  0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL               0xa4a
429
430 /* ICX M2PCIE */
431 #define ICX_M2PCIE_MSR_PMON_CTL0                0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0                0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL             0xa40
434
435 /* ICX UPI */
436 #define ICX_UPI_PCI_PMON_CTL0                   0x350
437 #define ICX_UPI_PCI_PMON_CTR0                   0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL                0x318
439 #define ICX_UPI_CTL_UMASK_EXT                   0xffffff
440
441 /* ICX M3UPI*/
442 #define ICX_M3UPI_PCI_PMON_CTL0                 0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0                 0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL              0xa0
445
446 /* ICX IMC */
447 #define ICX_NUMBER_IMC_CHN                      2
448 #define ICX_IMC_MEM_STRIDE                      0x4
449
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
528
529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
530 {
531         struct pci_dev *pdev = box->pci_dev;
532         int box_ctl = uncore_pci_box_ctl(box);
533         u32 config = 0;
534
535         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536                 config |= SNBEP_PMON_BOX_CTL_FRZ;
537                 pci_write_config_dword(pdev, box_ctl, config);
538         }
539 }
540
541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
542 {
543         struct pci_dev *pdev = box->pci_dev;
544         int box_ctl = uncore_pci_box_ctl(box);
545         u32 config = 0;
546
547         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549                 pci_write_config_dword(pdev, box_ctl, config);
550         }
551 }
552
553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
554 {
555         struct pci_dev *pdev = box->pci_dev;
556         struct hw_perf_event *hwc = &event->hw;
557
558         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
559 }
560
561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
562 {
563         struct pci_dev *pdev = box->pci_dev;
564         struct hw_perf_event *hwc = &event->hw;
565
566         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
567 }
568
569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
570 {
571         struct pci_dev *pdev = box->pci_dev;
572         struct hw_perf_event *hwc = &event->hw;
573         u64 count = 0;
574
575         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
577
578         return count;
579 }
580
581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
582 {
583         struct pci_dev *pdev = box->pci_dev;
584         int box_ctl = uncore_pci_box_ctl(box);
585
586         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
587 }
588
589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
590 {
591         u64 config;
592         unsigned msr;
593
594         msr = uncore_msr_box_ctl(box);
595         if (msr) {
596                 rdmsrl(msr, config);
597                 config |= SNBEP_PMON_BOX_CTL_FRZ;
598                 wrmsrl(msr, config);
599         }
600 }
601
602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
603 {
604         u64 config;
605         unsigned msr;
606
607         msr = uncore_msr_box_ctl(box);
608         if (msr) {
609                 rdmsrl(msr, config);
610                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
611                 wrmsrl(msr, config);
612         }
613 }
614
615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
616 {
617         struct hw_perf_event *hwc = &event->hw;
618         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
619
620         if (reg1->idx != EXTRA_REG_NONE)
621                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
622
623         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
624 }
625
626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627                                         struct perf_event *event)
628 {
629         struct hw_perf_event *hwc = &event->hw;
630
631         wrmsrl(hwc->config_base, hwc->config);
632 }
633
634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
635 {
636         unsigned msr = uncore_msr_box_ctl(box);
637
638         if (msr)
639                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
640 }
641
642 static struct attribute *snbep_uncore_formats_attr[] = {
643         &format_attr_event.attr,
644         &format_attr_umask.attr,
645         &format_attr_edge.attr,
646         &format_attr_inv.attr,
647         &format_attr_thresh8.attr,
648         NULL,
649 };
650
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652         &format_attr_event.attr,
653         &format_attr_umask.attr,
654         &format_attr_edge.attr,
655         &format_attr_inv.attr,
656         &format_attr_thresh5.attr,
657         NULL,
658 };
659
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661         &format_attr_event.attr,
662         &format_attr_umask.attr,
663         &format_attr_edge.attr,
664         &format_attr_tid_en.attr,
665         &format_attr_inv.attr,
666         &format_attr_thresh8.attr,
667         &format_attr_filter_tid.attr,
668         &format_attr_filter_nid.attr,
669         &format_attr_filter_state.attr,
670         &format_attr_filter_opc.attr,
671         NULL,
672 };
673
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675         &format_attr_event.attr,
676         &format_attr_occ_sel.attr,
677         &format_attr_edge.attr,
678         &format_attr_inv.attr,
679         &format_attr_thresh5.attr,
680         &format_attr_occ_invert.attr,
681         &format_attr_occ_edge.attr,
682         &format_attr_filter_band0.attr,
683         &format_attr_filter_band1.attr,
684         &format_attr_filter_band2.attr,
685         &format_attr_filter_band3.attr,
686         NULL,
687 };
688
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690         &format_attr_event_ext.attr,
691         &format_attr_umask.attr,
692         &format_attr_edge.attr,
693         &format_attr_inv.attr,
694         &format_attr_thresh8.attr,
695         &format_attr_match_rds.attr,
696         &format_attr_match_rnid30.attr,
697         &format_attr_match_rnid4.attr,
698         &format_attr_match_dnid.attr,
699         &format_attr_match_mc.attr,
700         &format_attr_match_opc.attr,
701         &format_attr_match_vnw.attr,
702         &format_attr_match0.attr,
703         &format_attr_match1.attr,
704         &format_attr_mask_rds.attr,
705         &format_attr_mask_rnid30.attr,
706         &format_attr_mask_rnid4.attr,
707         &format_attr_mask_dnid.attr,
708         &format_attr_mask_mc.attr,
709         &format_attr_mask_opc.attr,
710         &format_attr_mask_vnw.attr,
711         &format_attr_mask0.attr,
712         &format_attr_mask1.attr,
713         NULL,
714 };
715
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
718         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
719         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724         { /* end: all zeroes */ },
725 };
726
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
729         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
731         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
732         { /* end: all zeroes */ },
733 };
734
735 static const struct attribute_group snbep_uncore_format_group = {
736         .name = "format",
737         .attrs = snbep_uncore_formats_attr,
738 };
739
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
741         .name = "format",
742         .attrs = snbep_uncore_ubox_formats_attr,
743 };
744
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
746         .name = "format",
747         .attrs = snbep_uncore_cbox_formats_attr,
748 };
749
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
751         .name = "format",
752         .attrs = snbep_uncore_pcu_formats_attr,
753 };
754
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
756         .name = "format",
757         .attrs = snbep_uncore_qpi_formats_attr,
758 };
759
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
761         .disable_box    = snbep_uncore_msr_disable_box,         \
762         .enable_box     = snbep_uncore_msr_enable_box,          \
763         .disable_event  = snbep_uncore_msr_disable_event,       \
764         .enable_event   = snbep_uncore_msr_enable_event,        \
765         .read_counter   = uncore_msr_read_counter
766
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
768         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
769         .init_box       = snbep_uncore_msr_init_box             \
770
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
773 };
774
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
776         .init_box       = snbep_uncore_pci_init_box,            \
777         .disable_box    = snbep_uncore_pci_disable_box,         \
778         .enable_box     = snbep_uncore_pci_enable_box,          \
779         .disable_event  = snbep_uncore_pci_disable_event,       \
780         .read_counter   = snbep_uncore_pci_read_counter
781
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784         .enable_event   = snbep_uncore_pci_enable_event,        \
785 };
786
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
814         EVENT_CONSTRAINT_END
815 };
816
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828         EVENT_CONSTRAINT_END
829 };
830
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
860         EVENT_CONSTRAINT_END
861 };
862
863 static struct intel_uncore_type snbep_uncore_ubox = {
864         .name           = "ubox",
865         .num_counters   = 2,
866         .num_boxes      = 1,
867         .perf_ctr_bits  = 44,
868         .fixed_ctr_bits = 48,
869         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
870         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
871         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874         .ops            = &snbep_uncore_msr_ops,
875         .format_group   = &snbep_uncore_ubox_format_group,
876 };
877
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
904         EVENT_EXTRA_END
905 };
906
907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
908 {
909         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
911         int i;
912
913         if (uncore_box_is_fake(box))
914                 return;
915
916         for (i = 0; i < 5; i++) {
917                 if (reg1->alloc & (0x1 << i))
918                         atomic_sub(1 << (i * 6), &er->ref);
919         }
920         reg1->alloc = 0;
921 }
922
923 static struct event_constraint *
924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925                             u64 (*cbox_filter_mask)(int fields))
926 {
927         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
929         int i, alloc = 0;
930         unsigned long flags;
931         u64 mask;
932
933         if (reg1->idx == EXTRA_REG_NONE)
934                 return NULL;
935
936         raw_spin_lock_irqsave(&er->lock, flags);
937         for (i = 0; i < 5; i++) {
938                 if (!(reg1->idx & (0x1 << i)))
939                         continue;
940                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
941                         continue;
942
943                 mask = cbox_filter_mask(0x1 << i);
944                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945                     !((reg1->config ^ er->config) & mask)) {
946                         atomic_add(1 << (i * 6), &er->ref);
947                         er->config &= ~mask;
948                         er->config |= reg1->config & mask;
949                         alloc |= (0x1 << i);
950                 } else {
951                         break;
952                 }
953         }
954         raw_spin_unlock_irqrestore(&er->lock, flags);
955         if (i < 5)
956                 goto fail;
957
958         if (!uncore_box_is_fake(box))
959                 reg1->alloc |= alloc;
960
961         return NULL;
962 fail:
963         for (; i >= 0; i--) {
964                 if (alloc & (0x1 << i))
965                         atomic_sub(1 << (i * 6), &er->ref);
966         }
967         return &uncore_constraint_empty;
968 }
969
970 static u64 snbep_cbox_filter_mask(int fields)
971 {
972         u64 mask = 0;
973
974         if (fields & 0x1)
975                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
976         if (fields & 0x2)
977                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
978         if (fields & 0x4)
979                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
980         if (fields & 0x8)
981                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
982
983         return mask;
984 }
985
986 static struct event_constraint *
987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
988 {
989         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
990 }
991
992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
993 {
994         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995         struct extra_reg *er;
996         int idx = 0;
997
998         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999                 if (er->event != (event->hw.config & er->config_mask))
1000                         continue;
1001                 idx |= er->idx;
1002         }
1003
1004         if (idx) {
1005                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1008                 reg1->idx = idx;
1009         }
1010         return 0;
1011 }
1012
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015         .hw_config              = snbep_cbox_hw_config,
1016         .get_constraint         = snbep_cbox_get_constraint,
1017         .put_constraint         = snbep_cbox_put_constraint,
1018 };
1019
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1021         .name                   = "cbox",
1022         .num_counters           = 4,
1023         .num_boxes              = 8,
1024         .perf_ctr_bits          = 44,
1025         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1026         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1027         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1029         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1030         .num_shared_regs        = 1,
1031         .constraints            = snbep_uncore_cbox_constraints,
1032         .ops                    = &snbep_uncore_cbox_ops,
1033         .format_group           = &snbep_uncore_cbox_format_group,
1034 };
1035
1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1037 {
1038         struct hw_perf_event *hwc = &event->hw;
1039         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040         u64 config = reg1->config;
1041
1042         if (new_idx > reg1->idx)
1043                 config <<= 8 * (new_idx - reg1->idx);
1044         else
1045                 config >>= 8 * (reg1->idx - new_idx);
1046
1047         if (modify) {
1048                 hwc->config += new_idx - reg1->idx;
1049                 reg1->config = config;
1050                 reg1->idx = new_idx;
1051         }
1052         return config;
1053 }
1054
1055 static struct event_constraint *
1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1057 {
1058         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060         unsigned long flags;
1061         int idx = reg1->idx;
1062         u64 mask, config1 = reg1->config;
1063         bool ok = false;
1064
1065         if (reg1->idx == EXTRA_REG_NONE ||
1066             (!uncore_box_is_fake(box) && reg1->alloc))
1067                 return NULL;
1068 again:
1069         mask = 0xffULL << (idx * 8);
1070         raw_spin_lock_irqsave(&er->lock, flags);
1071         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072             !((config1 ^ er->config) & mask)) {
1073                 atomic_add(1 << (idx * 8), &er->ref);
1074                 er->config &= ~mask;
1075                 er->config |= config1 & mask;
1076                 ok = true;
1077         }
1078         raw_spin_unlock_irqrestore(&er->lock, flags);
1079
1080         if (!ok) {
1081                 idx = (idx + 1) % 4;
1082                 if (idx != reg1->idx) {
1083                         config1 = snbep_pcu_alter_er(event, idx, false);
1084                         goto again;
1085                 }
1086                 return &uncore_constraint_empty;
1087         }
1088
1089         if (!uncore_box_is_fake(box)) {
1090                 if (idx != reg1->idx)
1091                         snbep_pcu_alter_er(event, idx, true);
1092                 reg1->alloc = 1;
1093         }
1094         return NULL;
1095 }
1096
1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1098 {
1099         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1101
1102         if (uncore_box_is_fake(box) || !reg1->alloc)
1103                 return;
1104
1105         atomic_sub(1 << (reg1->idx * 8), &er->ref);
1106         reg1->alloc = 0;
1107 }
1108
1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1110 {
1111         struct hw_perf_event *hwc = &event->hw;
1112         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1114
1115         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117                 reg1->idx = ev_sel - 0xb;
1118                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1119         }
1120         return 0;
1121 }
1122
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125         .hw_config              = snbep_pcu_hw_config,
1126         .get_constraint         = snbep_pcu_get_constraint,
1127         .put_constraint         = snbep_pcu_put_constraint,
1128 };
1129
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1131         .name                   = "pcu",
1132         .num_counters           = 4,
1133         .num_boxes              = 1,
1134         .perf_ctr_bits          = 48,
1135         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1136         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1137         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1139         .num_shared_regs        = 1,
1140         .ops                    = &snbep_uncore_pcu_ops,
1141         .format_group           = &snbep_uncore_pcu_format_group,
1142 };
1143
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1145         &snbep_uncore_ubox,
1146         &snbep_uncore_cbox,
1147         &snbep_uncore_pcu,
1148         NULL,
1149 };
1150
1151 void snbep_uncore_cpu_init(void)
1152 {
1153         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155         uncore_msr_uncores = snbep_msr_uncores;
1156 }
1157
1158 enum {
1159         SNBEP_PCI_QPI_PORT0_FILTER,
1160         SNBEP_PCI_QPI_PORT1_FILTER,
1161         BDX_PCI_QPI_PORT2_FILTER,
1162 };
1163
1164 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1165 {
1166         struct hw_perf_event *hwc = &event->hw;
1167         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1168         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1169
1170         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1171                 reg1->idx = 0;
1172                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1173                 reg1->config = event->attr.config1;
1174                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1175                 reg2->config = event->attr.config2;
1176         }
1177         return 0;
1178 }
1179
1180 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1181 {
1182         struct pci_dev *pdev = box->pci_dev;
1183         struct hw_perf_event *hwc = &event->hw;
1184         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1185         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1186
1187         if (reg1->idx != EXTRA_REG_NONE) {
1188                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1189                 int die = box->dieid;
1190                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1191
1192                 if (filter_pdev) {
1193                         pci_write_config_dword(filter_pdev, reg1->reg,
1194                                                 (u32)reg1->config);
1195                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1196                                                 (u32)(reg1->config >> 32));
1197                         pci_write_config_dword(filter_pdev, reg2->reg,
1198                                                 (u32)reg2->config);
1199                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1200                                                 (u32)(reg2->config >> 32));
1201                 }
1202         }
1203
1204         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1205 }
1206
1207 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1208         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1209         .enable_event           = snbep_qpi_enable_event,
1210         .hw_config              = snbep_qpi_hw_config,
1211         .get_constraint         = uncore_get_constraint,
1212         .put_constraint         = uncore_put_constraint,
1213 };
1214
1215 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1216         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1217         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1218         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1219         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1220         .ops            = &snbep_uncore_pci_ops,                \
1221         .format_group   = &snbep_uncore_format_group
1222
1223 static struct intel_uncore_type snbep_uncore_ha = {
1224         .name           = "ha",
1225         .num_counters   = 4,
1226         .num_boxes      = 1,
1227         .perf_ctr_bits  = 48,
1228         SNBEP_UNCORE_PCI_COMMON_INIT(),
1229 };
1230
1231 static struct intel_uncore_type snbep_uncore_imc = {
1232         .name           = "imc",
1233         .num_counters   = 4,
1234         .num_boxes      = 4,
1235         .perf_ctr_bits  = 48,
1236         .fixed_ctr_bits = 48,
1237         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1238         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1239         .event_descs    = snbep_uncore_imc_events,
1240         SNBEP_UNCORE_PCI_COMMON_INIT(),
1241 };
1242
1243 static struct intel_uncore_type snbep_uncore_qpi = {
1244         .name                   = "qpi",
1245         .num_counters           = 4,
1246         .num_boxes              = 2,
1247         .perf_ctr_bits          = 48,
1248         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1249         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1250         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1251         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1252         .num_shared_regs        = 1,
1253         .ops                    = &snbep_uncore_qpi_ops,
1254         .event_descs            = snbep_uncore_qpi_events,
1255         .format_group           = &snbep_uncore_qpi_format_group,
1256 };
1257
1258
1259 static struct intel_uncore_type snbep_uncore_r2pcie = {
1260         .name           = "r2pcie",
1261         .num_counters   = 4,
1262         .num_boxes      = 1,
1263         .perf_ctr_bits  = 44,
1264         .constraints    = snbep_uncore_r2pcie_constraints,
1265         SNBEP_UNCORE_PCI_COMMON_INIT(),
1266 };
1267
1268 static struct intel_uncore_type snbep_uncore_r3qpi = {
1269         .name           = "r3qpi",
1270         .num_counters   = 3,
1271         .num_boxes      = 2,
1272         .perf_ctr_bits  = 44,
1273         .constraints    = snbep_uncore_r3qpi_constraints,
1274         SNBEP_UNCORE_PCI_COMMON_INIT(),
1275 };
1276
1277 enum {
1278         SNBEP_PCI_UNCORE_HA,
1279         SNBEP_PCI_UNCORE_IMC,
1280         SNBEP_PCI_UNCORE_QPI,
1281         SNBEP_PCI_UNCORE_R2PCIE,
1282         SNBEP_PCI_UNCORE_R3QPI,
1283 };
1284
1285 static struct intel_uncore_type *snbep_pci_uncores[] = {
1286         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1287         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1288         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1289         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1290         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1291         NULL,
1292 };
1293
1294 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1295         { /* Home Agent */
1296                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1297                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1298         },
1299         { /* MC Channel 0 */
1300                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1301                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1302         },
1303         { /* MC Channel 1 */
1304                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1305                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1306         },
1307         { /* MC Channel 2 */
1308                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1309                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1310         },
1311         { /* MC Channel 3 */
1312                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1313                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1314         },
1315         { /* QPI Port 0 */
1316                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1317                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1318         },
1319         { /* QPI Port 1 */
1320                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1321                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1322         },
1323         { /* R2PCIe */
1324                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1325                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1326         },
1327         { /* R3QPI Link 0 */
1328                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1329                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1330         },
1331         { /* R3QPI Link 1 */
1332                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1333                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1334         },
1335         { /* QPI Port 0 filter  */
1336                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1337                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1338                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1339         },
1340         { /* QPI Port 0 filter  */
1341                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1342                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1343                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1344         },
1345         { /* end: all zeroes */ }
1346 };
1347
1348 static struct pci_driver snbep_uncore_pci_driver = {
1349         .name           = "snbep_uncore",
1350         .id_table       = snbep_uncore_pci_ids,
1351 };
1352
1353 #define NODE_ID_MASK    0x7
1354
1355 /*
1356  * build pci bus to socket mapping
1357  */
1358 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1359 {
1360         struct pci_dev *ubox_dev = NULL;
1361         int i, bus, nodeid, segment, die_id;
1362         struct pci2phy_map *map;
1363         int err = 0;
1364         u32 config = 0;
1365
1366         while (1) {
1367                 /* find the UBOX device */
1368                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1369                 if (!ubox_dev)
1370                         break;
1371                 bus = ubox_dev->bus->number;
1372                 /*
1373                  * The nodeid and idmap registers only contain enough
1374                  * information to handle 8 nodes.  On systems with more
1375                  * than 8 nodes, we need to rely on NUMA information,
1376                  * filled in from BIOS supplied information, to determine
1377                  * the topology.
1378                  */
1379                 if (nr_node_ids <= 8) {
1380                         /* get the Node ID of the local register */
1381                         err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1382                         if (err)
1383                                 break;
1384                         nodeid = config & NODE_ID_MASK;
1385                         /* get the Node ID mapping */
1386                         err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1387                         if (err)
1388                                 break;
1389
1390                         segment = pci_domain_nr(ubox_dev->bus);
1391                         raw_spin_lock(&pci2phy_map_lock);
1392                         map = __find_pci2phy_map(segment);
1393                         if (!map) {
1394                                 raw_spin_unlock(&pci2phy_map_lock);
1395                                 err = -ENOMEM;
1396                                 break;
1397                         }
1398
1399                         /*
1400                          * every three bits in the Node ID mapping register maps
1401                          * to a particular node.
1402                          */
1403                         for (i = 0; i < 8; i++) {
1404                                 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1405                                         if (topology_max_die_per_package() > 1)
1406                                                 die_id = i;
1407                                         else
1408                                                 die_id = topology_phys_to_logical_pkg(i);
1409                                         if (die_id < 0)
1410                                                 die_id = -ENODEV;
1411                                         map->pbus_to_dieid[bus] = die_id;
1412                                         break;
1413                                 }
1414                         }
1415                         raw_spin_unlock(&pci2phy_map_lock);
1416                 } else {
1417                         int node = pcibus_to_node(ubox_dev->bus);
1418                         int cpu;
1419
1420                         segment = pci_domain_nr(ubox_dev->bus);
1421                         raw_spin_lock(&pci2phy_map_lock);
1422                         map = __find_pci2phy_map(segment);
1423                         if (!map) {
1424                                 raw_spin_unlock(&pci2phy_map_lock);
1425                                 err = -ENOMEM;
1426                                 break;
1427                         }
1428
1429                         die_id = -1;
1430                         for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1431                                 struct cpuinfo_x86 *c = &cpu_data(cpu);
1432
1433                                 if (c->initialized && cpu_to_node(cpu) == node) {
1434                                         map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1435                                         break;
1436                                 }
1437                         }
1438                         raw_spin_unlock(&pci2phy_map_lock);
1439
1440                         if (WARN_ON_ONCE(die_id == -1)) {
1441                                 err = -EINVAL;
1442                                 break;
1443                         }
1444                 }
1445         }
1446
1447         if (!err) {
1448                 /*
1449                  * For PCI bus with no UBOX device, find the next bus
1450                  * that has UBOX device and use its mapping.
1451                  */
1452                 raw_spin_lock(&pci2phy_map_lock);
1453                 list_for_each_entry(map, &pci2phy_map_head, list) {
1454                         i = -1;
1455                         if (reverse) {
1456                                 for (bus = 255; bus >= 0; bus--) {
1457                                         if (map->pbus_to_dieid[bus] != -1)
1458                                                 i = map->pbus_to_dieid[bus];
1459                                         else
1460                                                 map->pbus_to_dieid[bus] = i;
1461                                 }
1462                         } else {
1463                                 for (bus = 0; bus <= 255; bus++) {
1464                                         if (map->pbus_to_dieid[bus] != -1)
1465                                                 i = map->pbus_to_dieid[bus];
1466                                         else
1467                                                 map->pbus_to_dieid[bus] = i;
1468                                 }
1469                         }
1470                 }
1471                 raw_spin_unlock(&pci2phy_map_lock);
1472         }
1473
1474         pci_dev_put(ubox_dev);
1475
1476         return err ? pcibios_err_to_errno(err) : 0;
1477 }
1478
1479 int snbep_uncore_pci_init(void)
1480 {
1481         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1482         if (ret)
1483                 return ret;
1484         uncore_pci_uncores = snbep_pci_uncores;
1485         uncore_pci_driver = &snbep_uncore_pci_driver;
1486         return 0;
1487 }
1488 /* end of Sandy Bridge-EP uncore support */
1489
1490 /* IvyTown uncore support */
1491 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1492 {
1493         unsigned msr = uncore_msr_box_ctl(box);
1494         if (msr)
1495                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1496 }
1497
1498 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1499 {
1500         struct pci_dev *pdev = box->pci_dev;
1501
1502         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1503 }
1504
1505 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1506         .init_box       = ivbep_uncore_msr_init_box,            \
1507         .disable_box    = snbep_uncore_msr_disable_box,         \
1508         .enable_box     = snbep_uncore_msr_enable_box,          \
1509         .disable_event  = snbep_uncore_msr_disable_event,       \
1510         .enable_event   = snbep_uncore_msr_enable_event,        \
1511         .read_counter   = uncore_msr_read_counter
1512
1513 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1514         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1515 };
1516
1517 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1518         .init_box       = ivbep_uncore_pci_init_box,
1519         .disable_box    = snbep_uncore_pci_disable_box,
1520         .enable_box     = snbep_uncore_pci_enable_box,
1521         .disable_event  = snbep_uncore_pci_disable_event,
1522         .enable_event   = snbep_uncore_pci_enable_event,
1523         .read_counter   = snbep_uncore_pci_read_counter,
1524 };
1525
1526 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1527         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1528         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1529         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1530         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1531         .ops            = &ivbep_uncore_pci_ops,                        \
1532         .format_group   = &ivbep_uncore_format_group
1533
1534 static struct attribute *ivbep_uncore_formats_attr[] = {
1535         &format_attr_event.attr,
1536         &format_attr_umask.attr,
1537         &format_attr_edge.attr,
1538         &format_attr_inv.attr,
1539         &format_attr_thresh8.attr,
1540         NULL,
1541 };
1542
1543 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1544         &format_attr_event.attr,
1545         &format_attr_umask.attr,
1546         &format_attr_edge.attr,
1547         &format_attr_inv.attr,
1548         &format_attr_thresh5.attr,
1549         NULL,
1550 };
1551
1552 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1553         &format_attr_event.attr,
1554         &format_attr_umask.attr,
1555         &format_attr_edge.attr,
1556         &format_attr_tid_en.attr,
1557         &format_attr_thresh8.attr,
1558         &format_attr_filter_tid.attr,
1559         &format_attr_filter_link.attr,
1560         &format_attr_filter_state2.attr,
1561         &format_attr_filter_nid2.attr,
1562         &format_attr_filter_opc2.attr,
1563         &format_attr_filter_nc.attr,
1564         &format_attr_filter_c6.attr,
1565         &format_attr_filter_isoc.attr,
1566         NULL,
1567 };
1568
1569 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1570         &format_attr_event.attr,
1571         &format_attr_occ_sel.attr,
1572         &format_attr_edge.attr,
1573         &format_attr_thresh5.attr,
1574         &format_attr_occ_invert.attr,
1575         &format_attr_occ_edge.attr,
1576         &format_attr_filter_band0.attr,
1577         &format_attr_filter_band1.attr,
1578         &format_attr_filter_band2.attr,
1579         &format_attr_filter_band3.attr,
1580         NULL,
1581 };
1582
1583 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1584         &format_attr_event_ext.attr,
1585         &format_attr_umask.attr,
1586         &format_attr_edge.attr,
1587         &format_attr_thresh8.attr,
1588         &format_attr_match_rds.attr,
1589         &format_attr_match_rnid30.attr,
1590         &format_attr_match_rnid4.attr,
1591         &format_attr_match_dnid.attr,
1592         &format_attr_match_mc.attr,
1593         &format_attr_match_opc.attr,
1594         &format_attr_match_vnw.attr,
1595         &format_attr_match0.attr,
1596         &format_attr_match1.attr,
1597         &format_attr_mask_rds.attr,
1598         &format_attr_mask_rnid30.attr,
1599         &format_attr_mask_rnid4.attr,
1600         &format_attr_mask_dnid.attr,
1601         &format_attr_mask_mc.attr,
1602         &format_attr_mask_opc.attr,
1603         &format_attr_mask_vnw.attr,
1604         &format_attr_mask0.attr,
1605         &format_attr_mask1.attr,
1606         NULL,
1607 };
1608
1609 static const struct attribute_group ivbep_uncore_format_group = {
1610         .name = "format",
1611         .attrs = ivbep_uncore_formats_attr,
1612 };
1613
1614 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1615         .name = "format",
1616         .attrs = ivbep_uncore_ubox_formats_attr,
1617 };
1618
1619 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1620         .name = "format",
1621         .attrs = ivbep_uncore_cbox_formats_attr,
1622 };
1623
1624 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1625         .name = "format",
1626         .attrs = ivbep_uncore_pcu_formats_attr,
1627 };
1628
1629 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1630         .name = "format",
1631         .attrs = ivbep_uncore_qpi_formats_attr,
1632 };
1633
1634 static struct intel_uncore_type ivbep_uncore_ubox = {
1635         .name           = "ubox",
1636         .num_counters   = 2,
1637         .num_boxes      = 1,
1638         .perf_ctr_bits  = 44,
1639         .fixed_ctr_bits = 48,
1640         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1641         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1642         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1643         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1644         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1645         .ops            = &ivbep_uncore_msr_ops,
1646         .format_group   = &ivbep_uncore_ubox_format_group,
1647 };
1648
1649 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1650         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1651                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1652         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1653         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1654         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1655         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1656         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1657         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1658         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1659         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1660         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1661         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1662         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1663         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1664         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1665         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1666         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1667         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1668         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1669         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1670         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1671         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1672         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1673         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1674         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1675         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1676         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1677         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1678         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1679         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1680         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1681         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1682         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1683         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1684         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1685         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1686         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1687         EVENT_EXTRA_END
1688 };
1689
1690 static u64 ivbep_cbox_filter_mask(int fields)
1691 {
1692         u64 mask = 0;
1693
1694         if (fields & 0x1)
1695                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1696         if (fields & 0x2)
1697                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1698         if (fields & 0x4)
1699                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1700         if (fields & 0x8)
1701                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1702         if (fields & 0x10) {
1703                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1704                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1705                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1706                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1707         }
1708
1709         return mask;
1710 }
1711
1712 static struct event_constraint *
1713 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1714 {
1715         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1716 }
1717
1718 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1719 {
1720         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1721         struct extra_reg *er;
1722         int idx = 0;
1723
1724         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1725                 if (er->event != (event->hw.config & er->config_mask))
1726                         continue;
1727                 idx |= er->idx;
1728         }
1729
1730         if (idx) {
1731                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1732                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1733                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1734                 reg1->idx = idx;
1735         }
1736         return 0;
1737 }
1738
1739 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1740 {
1741         struct hw_perf_event *hwc = &event->hw;
1742         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1743
1744         if (reg1->idx != EXTRA_REG_NONE) {
1745                 u64 filter = uncore_shared_reg_config(box, 0);
1746                 wrmsrl(reg1->reg, filter & 0xffffffff);
1747                 wrmsrl(reg1->reg + 6, filter >> 32);
1748         }
1749
1750         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1751 }
1752
1753 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1754         .init_box               = ivbep_uncore_msr_init_box,
1755         .disable_box            = snbep_uncore_msr_disable_box,
1756         .enable_box             = snbep_uncore_msr_enable_box,
1757         .disable_event          = snbep_uncore_msr_disable_event,
1758         .enable_event           = ivbep_cbox_enable_event,
1759         .read_counter           = uncore_msr_read_counter,
1760         .hw_config              = ivbep_cbox_hw_config,
1761         .get_constraint         = ivbep_cbox_get_constraint,
1762         .put_constraint         = snbep_cbox_put_constraint,
1763 };
1764
1765 static struct intel_uncore_type ivbep_uncore_cbox = {
1766         .name                   = "cbox",
1767         .num_counters           = 4,
1768         .num_boxes              = 15,
1769         .perf_ctr_bits          = 44,
1770         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1771         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1772         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1773         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1774         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1775         .num_shared_regs        = 1,
1776         .constraints            = snbep_uncore_cbox_constraints,
1777         .ops                    = &ivbep_uncore_cbox_ops,
1778         .format_group           = &ivbep_uncore_cbox_format_group,
1779 };
1780
1781 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1782         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1783         .hw_config              = snbep_pcu_hw_config,
1784         .get_constraint         = snbep_pcu_get_constraint,
1785         .put_constraint         = snbep_pcu_put_constraint,
1786 };
1787
1788 static struct intel_uncore_type ivbep_uncore_pcu = {
1789         .name                   = "pcu",
1790         .num_counters           = 4,
1791         .num_boxes              = 1,
1792         .perf_ctr_bits          = 48,
1793         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1794         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1795         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1796         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1797         .num_shared_regs        = 1,
1798         .ops                    = &ivbep_uncore_pcu_ops,
1799         .format_group           = &ivbep_uncore_pcu_format_group,
1800 };
1801
1802 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1803         &ivbep_uncore_ubox,
1804         &ivbep_uncore_cbox,
1805         &ivbep_uncore_pcu,
1806         NULL,
1807 };
1808
1809 void ivbep_uncore_cpu_init(void)
1810 {
1811         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1812                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1813         uncore_msr_uncores = ivbep_msr_uncores;
1814 }
1815
1816 static struct intel_uncore_type ivbep_uncore_ha = {
1817         .name           = "ha",
1818         .num_counters   = 4,
1819         .num_boxes      = 2,
1820         .perf_ctr_bits  = 48,
1821         IVBEP_UNCORE_PCI_COMMON_INIT(),
1822 };
1823
1824 static struct intel_uncore_type ivbep_uncore_imc = {
1825         .name           = "imc",
1826         .num_counters   = 4,
1827         .num_boxes      = 8,
1828         .perf_ctr_bits  = 48,
1829         .fixed_ctr_bits = 48,
1830         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1831         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1832         .event_descs    = snbep_uncore_imc_events,
1833         IVBEP_UNCORE_PCI_COMMON_INIT(),
1834 };
1835
1836 /* registers in IRP boxes are not properly aligned */
1837 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1838 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1839
1840 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1841 {
1842         struct pci_dev *pdev = box->pci_dev;
1843         struct hw_perf_event *hwc = &event->hw;
1844
1845         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1846                                hwc->config | SNBEP_PMON_CTL_EN);
1847 }
1848
1849 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1850 {
1851         struct pci_dev *pdev = box->pci_dev;
1852         struct hw_perf_event *hwc = &event->hw;
1853
1854         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1855 }
1856
1857 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1858 {
1859         struct pci_dev *pdev = box->pci_dev;
1860         struct hw_perf_event *hwc = &event->hw;
1861         u64 count = 0;
1862
1863         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1864         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1865
1866         return count;
1867 }
1868
1869 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1870         .init_box       = ivbep_uncore_pci_init_box,
1871         .disable_box    = snbep_uncore_pci_disable_box,
1872         .enable_box     = snbep_uncore_pci_enable_box,
1873         .disable_event  = ivbep_uncore_irp_disable_event,
1874         .enable_event   = ivbep_uncore_irp_enable_event,
1875         .read_counter   = ivbep_uncore_irp_read_counter,
1876 };
1877
1878 static struct intel_uncore_type ivbep_uncore_irp = {
1879         .name                   = "irp",
1880         .num_counters           = 4,
1881         .num_boxes              = 1,
1882         .perf_ctr_bits          = 48,
1883         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1884         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1885         .ops                    = &ivbep_uncore_irp_ops,
1886         .format_group           = &ivbep_uncore_format_group,
1887 };
1888
1889 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1890         .init_box       = ivbep_uncore_pci_init_box,
1891         .disable_box    = snbep_uncore_pci_disable_box,
1892         .enable_box     = snbep_uncore_pci_enable_box,
1893         .disable_event  = snbep_uncore_pci_disable_event,
1894         .enable_event   = snbep_qpi_enable_event,
1895         .read_counter   = snbep_uncore_pci_read_counter,
1896         .hw_config      = snbep_qpi_hw_config,
1897         .get_constraint = uncore_get_constraint,
1898         .put_constraint = uncore_put_constraint,
1899 };
1900
1901 static struct intel_uncore_type ivbep_uncore_qpi = {
1902         .name                   = "qpi",
1903         .num_counters           = 4,
1904         .num_boxes              = 3,
1905         .perf_ctr_bits          = 48,
1906         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1907         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1908         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1909         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1910         .num_shared_regs        = 1,
1911         .ops                    = &ivbep_uncore_qpi_ops,
1912         .format_group           = &ivbep_uncore_qpi_format_group,
1913 };
1914
1915 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1916         .name           = "r2pcie",
1917         .num_counters   = 4,
1918         .num_boxes      = 1,
1919         .perf_ctr_bits  = 44,
1920         .constraints    = snbep_uncore_r2pcie_constraints,
1921         IVBEP_UNCORE_PCI_COMMON_INIT(),
1922 };
1923
1924 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1925         .name           = "r3qpi",
1926         .num_counters   = 3,
1927         .num_boxes      = 2,
1928         .perf_ctr_bits  = 44,
1929         .constraints    = snbep_uncore_r3qpi_constraints,
1930         IVBEP_UNCORE_PCI_COMMON_INIT(),
1931 };
1932
1933 enum {
1934         IVBEP_PCI_UNCORE_HA,
1935         IVBEP_PCI_UNCORE_IMC,
1936         IVBEP_PCI_UNCORE_IRP,
1937         IVBEP_PCI_UNCORE_QPI,
1938         IVBEP_PCI_UNCORE_R2PCIE,
1939         IVBEP_PCI_UNCORE_R3QPI,
1940 };
1941
1942 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1943         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1944         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1945         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1946         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1947         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1948         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1949         NULL,
1950 };
1951
1952 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1953         { /* Home Agent 0 */
1954                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1955                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1956         },
1957         { /* Home Agent 1 */
1958                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1959                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1960         },
1961         { /* MC0 Channel 0 */
1962                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1963                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1964         },
1965         { /* MC0 Channel 1 */
1966                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1967                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1968         },
1969         { /* MC0 Channel 3 */
1970                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1971                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1972         },
1973         { /* MC0 Channel 4 */
1974                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1975                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1976         },
1977         { /* MC1 Channel 0 */
1978                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1979                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1980         },
1981         { /* MC1 Channel 1 */
1982                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1983                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1984         },
1985         { /* MC1 Channel 3 */
1986                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1987                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1988         },
1989         { /* MC1 Channel 4 */
1990                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1991                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1992         },
1993         { /* IRP */
1994                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1995                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1996         },
1997         { /* QPI0 Port 0 */
1998                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1999                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2000         },
2001         { /* QPI0 Port 1 */
2002                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2003                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2004         },
2005         { /* QPI1 Port 2 */
2006                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2007                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2008         },
2009         { /* R2PCIe */
2010                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2011                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2012         },
2013         { /* R3QPI0 Link 0 */
2014                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2015                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2016         },
2017         { /* R3QPI0 Link 1 */
2018                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2019                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2020         },
2021         { /* R3QPI1 Link 2 */
2022                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2023                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2024         },
2025         { /* QPI Port 0 filter  */
2026                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2027                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2028                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2029         },
2030         { /* QPI Port 0 filter  */
2031                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2032                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2033                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2034         },
2035         { /* end: all zeroes */ }
2036 };
2037
2038 static struct pci_driver ivbep_uncore_pci_driver = {
2039         .name           = "ivbep_uncore",
2040         .id_table       = ivbep_uncore_pci_ids,
2041 };
2042
2043 int ivbep_uncore_pci_init(void)
2044 {
2045         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2046         if (ret)
2047                 return ret;
2048         uncore_pci_uncores = ivbep_pci_uncores;
2049         uncore_pci_driver = &ivbep_uncore_pci_driver;
2050         return 0;
2051 }
2052 /* end of IvyTown uncore support */
2053
2054 /* KNL uncore support */
2055 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2056         &format_attr_event.attr,
2057         &format_attr_umask.attr,
2058         &format_attr_edge.attr,
2059         &format_attr_tid_en.attr,
2060         &format_attr_inv.attr,
2061         &format_attr_thresh5.attr,
2062         NULL,
2063 };
2064
2065 static const struct attribute_group knl_uncore_ubox_format_group = {
2066         .name = "format",
2067         .attrs = knl_uncore_ubox_formats_attr,
2068 };
2069
2070 static struct intel_uncore_type knl_uncore_ubox = {
2071         .name                   = "ubox",
2072         .num_counters           = 2,
2073         .num_boxes              = 1,
2074         .perf_ctr_bits          = 48,
2075         .fixed_ctr_bits         = 48,
2076         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2077         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2078         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2079         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2080         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2081         .ops                    = &snbep_uncore_msr_ops,
2082         .format_group           = &knl_uncore_ubox_format_group,
2083 };
2084
2085 static struct attribute *knl_uncore_cha_formats_attr[] = {
2086         &format_attr_event.attr,
2087         &format_attr_umask.attr,
2088         &format_attr_qor.attr,
2089         &format_attr_edge.attr,
2090         &format_attr_tid_en.attr,
2091         &format_attr_inv.attr,
2092         &format_attr_thresh8.attr,
2093         &format_attr_filter_tid4.attr,
2094         &format_attr_filter_link3.attr,
2095         &format_attr_filter_state4.attr,
2096         &format_attr_filter_local.attr,
2097         &format_attr_filter_all_op.attr,
2098         &format_attr_filter_nnm.attr,
2099         &format_attr_filter_opc3.attr,
2100         &format_attr_filter_nc.attr,
2101         &format_attr_filter_isoc.attr,
2102         NULL,
2103 };
2104
2105 static const struct attribute_group knl_uncore_cha_format_group = {
2106         .name = "format",
2107         .attrs = knl_uncore_cha_formats_attr,
2108 };
2109
2110 static struct event_constraint knl_uncore_cha_constraints[] = {
2111         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2112         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2113         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2114         EVENT_CONSTRAINT_END
2115 };
2116
2117 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2118         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2119                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2120         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2121         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2122         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2123         EVENT_EXTRA_END
2124 };
2125
2126 static u64 knl_cha_filter_mask(int fields)
2127 {
2128         u64 mask = 0;
2129
2130         if (fields & 0x1)
2131                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2132         if (fields & 0x2)
2133                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2134         if (fields & 0x4)
2135                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2136         return mask;
2137 }
2138
2139 static struct event_constraint *
2140 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2141 {
2142         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2143 }
2144
2145 static int knl_cha_hw_config(struct intel_uncore_box *box,
2146                              struct perf_event *event)
2147 {
2148         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2149         struct extra_reg *er;
2150         int idx = 0;
2151
2152         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2153                 if (er->event != (event->hw.config & er->config_mask))
2154                         continue;
2155                 idx |= er->idx;
2156         }
2157
2158         if (idx) {
2159                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2160                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2161                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2162
2163                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2164                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2165                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2166                 reg1->idx = idx;
2167         }
2168         return 0;
2169 }
2170
2171 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2172                                     struct perf_event *event);
2173
2174 static struct intel_uncore_ops knl_uncore_cha_ops = {
2175         .init_box               = snbep_uncore_msr_init_box,
2176         .disable_box            = snbep_uncore_msr_disable_box,
2177         .enable_box             = snbep_uncore_msr_enable_box,
2178         .disable_event          = snbep_uncore_msr_disable_event,
2179         .enable_event           = hswep_cbox_enable_event,
2180         .read_counter           = uncore_msr_read_counter,
2181         .hw_config              = knl_cha_hw_config,
2182         .get_constraint         = knl_cha_get_constraint,
2183         .put_constraint         = snbep_cbox_put_constraint,
2184 };
2185
2186 static struct intel_uncore_type knl_uncore_cha = {
2187         .name                   = "cha",
2188         .num_counters           = 4,
2189         .num_boxes              = 38,
2190         .perf_ctr_bits          = 48,
2191         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2192         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2193         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2194         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2195         .msr_offset             = KNL_CHA_MSR_OFFSET,
2196         .num_shared_regs        = 1,
2197         .constraints            = knl_uncore_cha_constraints,
2198         .ops                    = &knl_uncore_cha_ops,
2199         .format_group           = &knl_uncore_cha_format_group,
2200 };
2201
2202 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2203         &format_attr_event2.attr,
2204         &format_attr_use_occ_ctr.attr,
2205         &format_attr_occ_sel.attr,
2206         &format_attr_edge.attr,
2207         &format_attr_tid_en.attr,
2208         &format_attr_inv.attr,
2209         &format_attr_thresh6.attr,
2210         &format_attr_occ_invert.attr,
2211         &format_attr_occ_edge_det.attr,
2212         NULL,
2213 };
2214
2215 static const struct attribute_group knl_uncore_pcu_format_group = {
2216         .name = "format",
2217         .attrs = knl_uncore_pcu_formats_attr,
2218 };
2219
2220 static struct intel_uncore_type knl_uncore_pcu = {
2221         .name                   = "pcu",
2222         .num_counters           = 4,
2223         .num_boxes              = 1,
2224         .perf_ctr_bits          = 48,
2225         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2226         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2227         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2228         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2229         .ops                    = &snbep_uncore_msr_ops,
2230         .format_group           = &knl_uncore_pcu_format_group,
2231 };
2232
2233 static struct intel_uncore_type *knl_msr_uncores[] = {
2234         &knl_uncore_ubox,
2235         &knl_uncore_cha,
2236         &knl_uncore_pcu,
2237         NULL,
2238 };
2239
2240 void knl_uncore_cpu_init(void)
2241 {
2242         uncore_msr_uncores = knl_msr_uncores;
2243 }
2244
2245 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2246 {
2247         struct pci_dev *pdev = box->pci_dev;
2248         int box_ctl = uncore_pci_box_ctl(box);
2249
2250         pci_write_config_dword(pdev, box_ctl, 0);
2251 }
2252
2253 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2254                                         struct perf_event *event)
2255 {
2256         struct pci_dev *pdev = box->pci_dev;
2257         struct hw_perf_event *hwc = &event->hw;
2258
2259         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2260                                                         == UNCORE_FIXED_EVENT)
2261                 pci_write_config_dword(pdev, hwc->config_base,
2262                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2263         else
2264                 pci_write_config_dword(pdev, hwc->config_base,
2265                                        hwc->config | SNBEP_PMON_CTL_EN);
2266 }
2267
2268 static struct intel_uncore_ops knl_uncore_imc_ops = {
2269         .init_box       = snbep_uncore_pci_init_box,
2270         .disable_box    = snbep_uncore_pci_disable_box,
2271         .enable_box     = knl_uncore_imc_enable_box,
2272         .read_counter   = snbep_uncore_pci_read_counter,
2273         .enable_event   = knl_uncore_imc_enable_event,
2274         .disable_event  = snbep_uncore_pci_disable_event,
2275 };
2276
2277 static struct intel_uncore_type knl_uncore_imc_uclk = {
2278         .name                   = "imc_uclk",
2279         .num_counters           = 4,
2280         .num_boxes              = 2,
2281         .perf_ctr_bits          = 48,
2282         .fixed_ctr_bits         = 48,
2283         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2284         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2285         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2286         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2287         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2288         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2289         .ops                    = &knl_uncore_imc_ops,
2290         .format_group           = &snbep_uncore_format_group,
2291 };
2292
2293 static struct intel_uncore_type knl_uncore_imc_dclk = {
2294         .name                   = "imc",
2295         .num_counters           = 4,
2296         .num_boxes              = 6,
2297         .perf_ctr_bits          = 48,
2298         .fixed_ctr_bits         = 48,
2299         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2300         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2301         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2302         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2303         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2304         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2305         .ops                    = &knl_uncore_imc_ops,
2306         .format_group           = &snbep_uncore_format_group,
2307 };
2308
2309 static struct intel_uncore_type knl_uncore_edc_uclk = {
2310         .name                   = "edc_uclk",
2311         .num_counters           = 4,
2312         .num_boxes              = 8,
2313         .perf_ctr_bits          = 48,
2314         .fixed_ctr_bits         = 48,
2315         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2316         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2317         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2318         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2319         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2320         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2321         .ops                    = &knl_uncore_imc_ops,
2322         .format_group           = &snbep_uncore_format_group,
2323 };
2324
2325 static struct intel_uncore_type knl_uncore_edc_eclk = {
2326         .name                   = "edc_eclk",
2327         .num_counters           = 4,
2328         .num_boxes              = 8,
2329         .perf_ctr_bits          = 48,
2330         .fixed_ctr_bits         = 48,
2331         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2332         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2333         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2334         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2335         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2336         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2337         .ops                    = &knl_uncore_imc_ops,
2338         .format_group           = &snbep_uncore_format_group,
2339 };
2340
2341 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2342         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2343         EVENT_CONSTRAINT_END
2344 };
2345
2346 static struct intel_uncore_type knl_uncore_m2pcie = {
2347         .name           = "m2pcie",
2348         .num_counters   = 4,
2349         .num_boxes      = 1,
2350         .perf_ctr_bits  = 48,
2351         .constraints    = knl_uncore_m2pcie_constraints,
2352         SNBEP_UNCORE_PCI_COMMON_INIT(),
2353 };
2354
2355 static struct attribute *knl_uncore_irp_formats_attr[] = {
2356         &format_attr_event.attr,
2357         &format_attr_umask.attr,
2358         &format_attr_qor.attr,
2359         &format_attr_edge.attr,
2360         &format_attr_inv.attr,
2361         &format_attr_thresh8.attr,
2362         NULL,
2363 };
2364
2365 static const struct attribute_group knl_uncore_irp_format_group = {
2366         .name = "format",
2367         .attrs = knl_uncore_irp_formats_attr,
2368 };
2369
2370 static struct intel_uncore_type knl_uncore_irp = {
2371         .name                   = "irp",
2372         .num_counters           = 2,
2373         .num_boxes              = 1,
2374         .perf_ctr_bits          = 48,
2375         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2376         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2377         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2378         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2379         .ops                    = &snbep_uncore_pci_ops,
2380         .format_group           = &knl_uncore_irp_format_group,
2381 };
2382
2383 enum {
2384         KNL_PCI_UNCORE_MC_UCLK,
2385         KNL_PCI_UNCORE_MC_DCLK,
2386         KNL_PCI_UNCORE_EDC_UCLK,
2387         KNL_PCI_UNCORE_EDC_ECLK,
2388         KNL_PCI_UNCORE_M2PCIE,
2389         KNL_PCI_UNCORE_IRP,
2390 };
2391
2392 static struct intel_uncore_type *knl_pci_uncores[] = {
2393         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2394         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2395         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2396         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2397         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2398         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2399         NULL,
2400 };
2401
2402 /*
2403  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2404  * device type. prior to KNL, each instance of a PMU device type had a unique
2405  * device ID.
2406  *
2407  *      PCI Device ID   Uncore PMU Devices
2408  *      ----------------------------------
2409  *      0x7841          MC0 UClk, MC1 UClk
2410  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2411  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2412  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2413  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2414  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2415  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2416  *      0x7817          M2PCIe
2417  *      0x7814          IRP
2418 */
2419
2420 static const struct pci_device_id knl_uncore_pci_ids[] = {
2421         { /* MC0 UClk */
2422                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2423                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2424         },
2425         { /* MC1 UClk */
2426                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2427                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2428         },
2429         { /* MC0 DClk CH 0 */
2430                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2431                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2432         },
2433         { /* MC0 DClk CH 1 */
2434                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2435                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2436         },
2437         { /* MC0 DClk CH 2 */
2438                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2439                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2440         },
2441         { /* MC1 DClk CH 0 */
2442                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2443                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2444         },
2445         { /* MC1 DClk CH 1 */
2446                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2447                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2448         },
2449         { /* MC1 DClk CH 2 */
2450                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2451                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2452         },
2453         { /* EDC0 UClk */
2454                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2455                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2456         },
2457         { /* EDC1 UClk */
2458                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2459                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2460         },
2461         { /* EDC2 UClk */
2462                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2463                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2464         },
2465         { /* EDC3 UClk */
2466                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2467                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2468         },
2469         { /* EDC4 UClk */
2470                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2471                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2472         },
2473         { /* EDC5 UClk */
2474                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2475                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2476         },
2477         { /* EDC6 UClk */
2478                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2479                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2480         },
2481         { /* EDC7 UClk */
2482                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2483                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2484         },
2485         { /* EDC0 EClk */
2486                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2487                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2488         },
2489         { /* EDC1 EClk */
2490                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2491                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2492         },
2493         { /* EDC2 EClk */
2494                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2495                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2496         },
2497         { /* EDC3 EClk */
2498                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2499                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2500         },
2501         { /* EDC4 EClk */
2502                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2503                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2504         },
2505         { /* EDC5 EClk */
2506                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2507                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2508         },
2509         { /* EDC6 EClk */
2510                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2511                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2512         },
2513         { /* EDC7 EClk */
2514                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2515                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2516         },
2517         { /* M2PCIe */
2518                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2519                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2520         },
2521         { /* IRP */
2522                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2523                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2524         },
2525         { /* end: all zeroes */ }
2526 };
2527
2528 static struct pci_driver knl_uncore_pci_driver = {
2529         .name           = "knl_uncore",
2530         .id_table       = knl_uncore_pci_ids,
2531 };
2532
2533 int knl_uncore_pci_init(void)
2534 {
2535         int ret;
2536
2537         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2538         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2539         if (ret)
2540                 return ret;
2541         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2542         if (ret)
2543                 return ret;
2544         uncore_pci_uncores = knl_pci_uncores;
2545         uncore_pci_driver = &knl_uncore_pci_driver;
2546         return 0;
2547 }
2548
2549 /* end of KNL uncore support */
2550
2551 /* Haswell-EP uncore support */
2552 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2553         &format_attr_event.attr,
2554         &format_attr_umask.attr,
2555         &format_attr_edge.attr,
2556         &format_attr_inv.attr,
2557         &format_attr_thresh5.attr,
2558         &format_attr_filter_tid2.attr,
2559         &format_attr_filter_cid.attr,
2560         NULL,
2561 };
2562
2563 static const struct attribute_group hswep_uncore_ubox_format_group = {
2564         .name = "format",
2565         .attrs = hswep_uncore_ubox_formats_attr,
2566 };
2567
2568 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2569 {
2570         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2571         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2572         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2573         reg1->idx = 0;
2574         return 0;
2575 }
2576
2577 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2578         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2579         .hw_config              = hswep_ubox_hw_config,
2580         .get_constraint         = uncore_get_constraint,
2581         .put_constraint         = uncore_put_constraint,
2582 };
2583
2584 static struct intel_uncore_type hswep_uncore_ubox = {
2585         .name                   = "ubox",
2586         .num_counters           = 2,
2587         .num_boxes              = 1,
2588         .perf_ctr_bits          = 44,
2589         .fixed_ctr_bits         = 48,
2590         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2591         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2592         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2593         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2594         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2595         .num_shared_regs        = 1,
2596         .ops                    = &hswep_uncore_ubox_ops,
2597         .format_group           = &hswep_uncore_ubox_format_group,
2598 };
2599
2600 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2601         &format_attr_event.attr,
2602         &format_attr_umask.attr,
2603         &format_attr_edge.attr,
2604         &format_attr_tid_en.attr,
2605         &format_attr_thresh8.attr,
2606         &format_attr_filter_tid3.attr,
2607         &format_attr_filter_link2.attr,
2608         &format_attr_filter_state3.attr,
2609         &format_attr_filter_nid2.attr,
2610         &format_attr_filter_opc2.attr,
2611         &format_attr_filter_nc.attr,
2612         &format_attr_filter_c6.attr,
2613         &format_attr_filter_isoc.attr,
2614         NULL,
2615 };
2616
2617 static const struct attribute_group hswep_uncore_cbox_format_group = {
2618         .name = "format",
2619         .attrs = hswep_uncore_cbox_formats_attr,
2620 };
2621
2622 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2623         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2624         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2625         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2626         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2627         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2628         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2629         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2630         EVENT_CONSTRAINT_END
2631 };
2632
2633 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2634         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2635                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2636         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2637         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2638         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2639         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2640         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2641         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2642         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2643         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2644         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2645         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2646         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2647         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2648         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2649         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2650         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2651         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2652         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2653         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2654         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2655         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2656         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2657         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2658         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2659         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2660         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2661         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2662         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2663         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2664         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2665         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2666         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2667         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2668         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2669         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2670         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2671         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2672         EVENT_EXTRA_END
2673 };
2674
2675 static u64 hswep_cbox_filter_mask(int fields)
2676 {
2677         u64 mask = 0;
2678         if (fields & 0x1)
2679                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2680         if (fields & 0x2)
2681                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2682         if (fields & 0x4)
2683                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2684         if (fields & 0x8)
2685                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2686         if (fields & 0x10) {
2687                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2688                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2689                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2690                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2691         }
2692         return mask;
2693 }
2694
2695 static struct event_constraint *
2696 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2697 {
2698         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2699 }
2700
2701 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2702 {
2703         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2704         struct extra_reg *er;
2705         int idx = 0;
2706
2707         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2708                 if (er->event != (event->hw.config & er->config_mask))
2709                         continue;
2710                 idx |= er->idx;
2711         }
2712
2713         if (idx) {
2714                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2715                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2716                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2717                 reg1->idx = idx;
2718         }
2719         return 0;
2720 }
2721
2722 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2723                                   struct perf_event *event)
2724 {
2725         struct hw_perf_event *hwc = &event->hw;
2726         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2727
2728         if (reg1->idx != EXTRA_REG_NONE) {
2729                 u64 filter = uncore_shared_reg_config(box, 0);
2730                 wrmsrl(reg1->reg, filter & 0xffffffff);
2731                 wrmsrl(reg1->reg + 1, filter >> 32);
2732         }
2733
2734         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2735 }
2736
2737 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2738         .init_box               = snbep_uncore_msr_init_box,
2739         .disable_box            = snbep_uncore_msr_disable_box,
2740         .enable_box             = snbep_uncore_msr_enable_box,
2741         .disable_event          = snbep_uncore_msr_disable_event,
2742         .enable_event           = hswep_cbox_enable_event,
2743         .read_counter           = uncore_msr_read_counter,
2744         .hw_config              = hswep_cbox_hw_config,
2745         .get_constraint         = hswep_cbox_get_constraint,
2746         .put_constraint         = snbep_cbox_put_constraint,
2747 };
2748
2749 static struct intel_uncore_type hswep_uncore_cbox = {
2750         .name                   = "cbox",
2751         .num_counters           = 4,
2752         .num_boxes              = 18,
2753         .perf_ctr_bits          = 48,
2754         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2755         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2756         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2757         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2758         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2759         .num_shared_regs        = 1,
2760         .constraints            = hswep_uncore_cbox_constraints,
2761         .ops                    = &hswep_uncore_cbox_ops,
2762         .format_group           = &hswep_uncore_cbox_format_group,
2763 };
2764
2765 /*
2766  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2767  */
2768 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2769 {
2770         unsigned msr = uncore_msr_box_ctl(box);
2771
2772         if (msr) {
2773                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2774                 u64 flags = 0;
2775                 int i;
2776
2777                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2778                         flags |= (1ULL << i);
2779                         wrmsrl(msr, flags);
2780                 }
2781         }
2782 }
2783
2784 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2785         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2786         .init_box               = hswep_uncore_sbox_msr_init_box
2787 };
2788
2789 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2790         &format_attr_event.attr,
2791         &format_attr_umask.attr,
2792         &format_attr_edge.attr,
2793         &format_attr_tid_en.attr,
2794         &format_attr_inv.attr,
2795         &format_attr_thresh8.attr,
2796         NULL,
2797 };
2798
2799 static const struct attribute_group hswep_uncore_sbox_format_group = {
2800         .name = "format",
2801         .attrs = hswep_uncore_sbox_formats_attr,
2802 };
2803
2804 static struct intel_uncore_type hswep_uncore_sbox = {
2805         .name                   = "sbox",
2806         .num_counters           = 4,
2807         .num_boxes              = 4,
2808         .perf_ctr_bits          = 44,
2809         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2810         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2811         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2812         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2813         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2814         .ops                    = &hswep_uncore_sbox_msr_ops,
2815         .format_group           = &hswep_uncore_sbox_format_group,
2816 };
2817
2818 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2819 {
2820         struct hw_perf_event *hwc = &event->hw;
2821         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2822         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2823
2824         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2825                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2826                 reg1->idx = ev_sel - 0xb;
2827                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2828         }
2829         return 0;
2830 }
2831
2832 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2833         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2834         .hw_config              = hswep_pcu_hw_config,
2835         .get_constraint         = snbep_pcu_get_constraint,
2836         .put_constraint         = snbep_pcu_put_constraint,
2837 };
2838
2839 static struct intel_uncore_type hswep_uncore_pcu = {
2840         .name                   = "pcu",
2841         .num_counters           = 4,
2842         .num_boxes              = 1,
2843         .perf_ctr_bits          = 48,
2844         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2845         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2846         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2847         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2848         .num_shared_regs        = 1,
2849         .ops                    = &hswep_uncore_pcu_ops,
2850         .format_group           = &snbep_uncore_pcu_format_group,
2851 };
2852
2853 static struct intel_uncore_type *hswep_msr_uncores[] = {
2854         &hswep_uncore_ubox,
2855         &hswep_uncore_cbox,
2856         &hswep_uncore_sbox,
2857         &hswep_uncore_pcu,
2858         NULL,
2859 };
2860
2861 #define HSWEP_PCU_DID                   0x2fc0
2862 #define HSWEP_PCU_CAPID4_OFFET          0x94
2863 #define hswep_get_chop(_cap)            (((_cap) >> 6) & 0x3)
2864
2865 static bool hswep_has_limit_sbox(unsigned int device)
2866 {
2867         struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2868         u32 capid4;
2869
2870         if (!dev)
2871                 return false;
2872
2873         pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2874         if (!hswep_get_chop(capid4))
2875                 return true;
2876
2877         return false;
2878 }
2879
2880 void hswep_uncore_cpu_init(void)
2881 {
2882         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2883                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2884
2885         /* Detect 6-8 core systems with only two SBOXes */
2886         if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2887                 hswep_uncore_sbox.num_boxes = 2;
2888
2889         uncore_msr_uncores = hswep_msr_uncores;
2890 }
2891
2892 static struct intel_uncore_type hswep_uncore_ha = {
2893         .name           = "ha",
2894         .num_counters   = 4,
2895         .num_boxes      = 2,
2896         .perf_ctr_bits  = 48,
2897         SNBEP_UNCORE_PCI_COMMON_INIT(),
2898 };
2899
2900 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2901         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2902         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2903         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2904         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2905         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2906         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2907         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2908         { /* end: all zeroes */ },
2909 };
2910
2911 static struct intel_uncore_type hswep_uncore_imc = {
2912         .name           = "imc",
2913         .num_counters   = 4,
2914         .num_boxes      = 8,
2915         .perf_ctr_bits  = 48,
2916         .fixed_ctr_bits = 48,
2917         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2918         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2919         .event_descs    = hswep_uncore_imc_events,
2920         SNBEP_UNCORE_PCI_COMMON_INIT(),
2921 };
2922
2923 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2924
2925 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2926 {
2927         struct pci_dev *pdev = box->pci_dev;
2928         struct hw_perf_event *hwc = &event->hw;
2929         u64 count = 0;
2930
2931         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2932         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2933
2934         return count;
2935 }
2936
2937 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2938         .init_box       = snbep_uncore_pci_init_box,
2939         .disable_box    = snbep_uncore_pci_disable_box,
2940         .enable_box     = snbep_uncore_pci_enable_box,
2941         .disable_event  = ivbep_uncore_irp_disable_event,
2942         .enable_event   = ivbep_uncore_irp_enable_event,
2943         .read_counter   = hswep_uncore_irp_read_counter,
2944 };
2945
2946 static struct intel_uncore_type hswep_uncore_irp = {
2947         .name                   = "irp",
2948         .num_counters           = 4,
2949         .num_boxes              = 1,
2950         .perf_ctr_bits          = 48,
2951         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2952         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2953         .ops                    = &hswep_uncore_irp_ops,
2954         .format_group           = &snbep_uncore_format_group,
2955 };
2956
2957 static struct intel_uncore_type hswep_uncore_qpi = {
2958         .name                   = "qpi",
2959         .num_counters           = 4,
2960         .num_boxes              = 3,
2961         .perf_ctr_bits          = 48,
2962         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2963         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2964         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2965         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2966         .num_shared_regs        = 1,
2967         .ops                    = &snbep_uncore_qpi_ops,
2968         .format_group           = &snbep_uncore_qpi_format_group,
2969 };
2970
2971 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2972         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2973         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2974         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2975         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2976         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2977         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2978         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2979         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2980         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2981         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2982         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2983         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2984         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2985         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2986         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2987         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2988         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2989         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2990         EVENT_CONSTRAINT_END
2991 };
2992
2993 static struct intel_uncore_type hswep_uncore_r2pcie = {
2994         .name           = "r2pcie",
2995         .num_counters   = 4,
2996         .num_boxes      = 1,
2997         .perf_ctr_bits  = 48,
2998         .constraints    = hswep_uncore_r2pcie_constraints,
2999         SNBEP_UNCORE_PCI_COMMON_INIT(),
3000 };
3001
3002 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3003         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3004         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3005         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3006         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3007         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3008         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3009         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3010         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3011         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3012         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3013         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3014         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3015         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3016         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3017         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3018         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3019         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3020         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3021         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3022         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3023         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3024         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3025         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3026         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3027         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3028         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3029         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3030         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3031         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3032         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3033         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3034         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3035         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3036         EVENT_CONSTRAINT_END
3037 };
3038
3039 static struct intel_uncore_type hswep_uncore_r3qpi = {
3040         .name           = "r3qpi",
3041         .num_counters   = 3,
3042         .num_boxes      = 3,
3043         .perf_ctr_bits  = 44,
3044         .constraints    = hswep_uncore_r3qpi_constraints,
3045         SNBEP_UNCORE_PCI_COMMON_INIT(),
3046 };
3047
3048 enum {
3049         HSWEP_PCI_UNCORE_HA,
3050         HSWEP_PCI_UNCORE_IMC,
3051         HSWEP_PCI_UNCORE_IRP,
3052         HSWEP_PCI_UNCORE_QPI,
3053         HSWEP_PCI_UNCORE_R2PCIE,
3054         HSWEP_PCI_UNCORE_R3QPI,
3055 };
3056
3057 static struct intel_uncore_type *hswep_pci_uncores[] = {
3058         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
3059         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
3060         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
3061         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
3062         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
3063         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
3064         NULL,
3065 };
3066
3067 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3068         { /* Home Agent 0 */
3069                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3070                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3071         },
3072         { /* Home Agent 1 */
3073                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3074                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3075         },
3076         { /* MC0 Channel 0 */
3077                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3078                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3079         },
3080         { /* MC0 Channel 1 */
3081                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3082                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3083         },
3084         { /* MC0 Channel 2 */
3085                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3086                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3087         },
3088         { /* MC0 Channel 3 */
3089                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3090                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3091         },
3092         { /* MC1 Channel 0 */
3093                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3094                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3095         },
3096         { /* MC1 Channel 1 */
3097                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3098                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3099         },
3100         { /* MC1 Channel 2 */
3101                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3102                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3103         },
3104         { /* MC1 Channel 3 */
3105                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3106                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3107         },
3108         { /* IRP */
3109                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3110                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3111         },
3112         { /* QPI0 Port 0 */
3113                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3114                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3115         },
3116         { /* QPI0 Port 1 */
3117                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3118                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3119         },
3120         { /* QPI1 Port 2 */
3121                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3122                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3123         },
3124         { /* R2PCIe */
3125                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3126                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3127         },
3128         { /* R3QPI0 Link 0 */
3129                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3130                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3131         },
3132         { /* R3QPI0 Link 1 */
3133                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3134                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3135         },
3136         { /* R3QPI1 Link 2 */
3137                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3138                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3139         },
3140         { /* QPI Port 0 filter  */
3141                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3142                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3143                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3144         },
3145         { /* QPI Port 1 filter  */
3146                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3147                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3148                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3149         },
3150         { /* end: all zeroes */ }
3151 };
3152
3153 static struct pci_driver hswep_uncore_pci_driver = {
3154         .name           = "hswep_uncore",
3155         .id_table       = hswep_uncore_pci_ids,
3156 };
3157
3158 int hswep_uncore_pci_init(void)
3159 {
3160         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3161         if (ret)
3162                 return ret;
3163         uncore_pci_uncores = hswep_pci_uncores;
3164         uncore_pci_driver = &hswep_uncore_pci_driver;
3165         return 0;
3166 }
3167 /* end of Haswell-EP uncore support */
3168
3169 /* BDX uncore support */
3170
3171 static struct intel_uncore_type bdx_uncore_ubox = {
3172         .name                   = "ubox",
3173         .num_counters           = 2,
3174         .num_boxes              = 1,
3175         .perf_ctr_bits          = 48,
3176         .fixed_ctr_bits         = 48,
3177         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3178         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3179         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3180         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3181         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3182         .num_shared_regs        = 1,
3183         .ops                    = &ivbep_uncore_msr_ops,
3184         .format_group           = &ivbep_uncore_ubox_format_group,
3185 };
3186
3187 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3188         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3189         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3190         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3191         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3192         EVENT_CONSTRAINT_END
3193 };
3194
3195 static struct intel_uncore_type bdx_uncore_cbox = {
3196         .name                   = "cbox",
3197         .num_counters           = 4,
3198         .num_boxes              = 24,
3199         .perf_ctr_bits          = 48,
3200         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3201         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3202         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3203         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3204         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3205         .num_shared_regs        = 1,
3206         .constraints            = bdx_uncore_cbox_constraints,
3207         .ops                    = &hswep_uncore_cbox_ops,
3208         .format_group           = &hswep_uncore_cbox_format_group,
3209 };
3210
3211 static struct intel_uncore_type bdx_uncore_sbox = {
3212         .name                   = "sbox",
3213         .num_counters           = 4,
3214         .num_boxes              = 4,
3215         .perf_ctr_bits          = 48,
3216         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3217         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3218         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3219         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3220         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3221         .ops                    = &hswep_uncore_sbox_msr_ops,
3222         .format_group           = &hswep_uncore_sbox_format_group,
3223 };
3224
3225 #define BDX_MSR_UNCORE_SBOX     3
3226
3227 static struct intel_uncore_type *bdx_msr_uncores[] = {
3228         &bdx_uncore_ubox,
3229         &bdx_uncore_cbox,
3230         &hswep_uncore_pcu,
3231         &bdx_uncore_sbox,
3232         NULL,
3233 };
3234
3235 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3236 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3237         EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3238         EVENT_CONSTRAINT_END
3239 };
3240
3241 #define BDX_PCU_DID                     0x6fc0
3242
3243 void bdx_uncore_cpu_init(void)
3244 {
3245         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3246                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3247         uncore_msr_uncores = bdx_msr_uncores;
3248
3249         /* Detect systems with no SBOXes */
3250         if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3251                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3252
3253         hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3254 }
3255
3256 static struct intel_uncore_type bdx_uncore_ha = {
3257         .name           = "ha",
3258         .num_counters   = 4,
3259         .num_boxes      = 2,
3260         .perf_ctr_bits  = 48,
3261         SNBEP_UNCORE_PCI_COMMON_INIT(),
3262 };
3263
3264 static struct intel_uncore_type bdx_uncore_imc = {
3265         .name           = "imc",
3266         .num_counters   = 4,
3267         .num_boxes      = 8,
3268         .perf_ctr_bits  = 48,
3269         .fixed_ctr_bits = 48,
3270         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3271         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3272         .event_descs    = hswep_uncore_imc_events,
3273         SNBEP_UNCORE_PCI_COMMON_INIT(),
3274 };
3275
3276 static struct intel_uncore_type bdx_uncore_irp = {
3277         .name                   = "irp",
3278         .num_counters           = 4,
3279         .num_boxes              = 1,
3280         .perf_ctr_bits          = 48,
3281         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3282         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3283         .ops                    = &hswep_uncore_irp_ops,
3284         .format_group           = &snbep_uncore_format_group,
3285 };
3286
3287 static struct intel_uncore_type bdx_uncore_qpi = {
3288         .name                   = "qpi",
3289         .num_counters           = 4,
3290         .num_boxes              = 3,
3291         .perf_ctr_bits          = 48,
3292         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3293         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3294         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3295         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3296         .num_shared_regs        = 1,
3297         .ops                    = &snbep_uncore_qpi_ops,
3298         .format_group           = &snbep_uncore_qpi_format_group,
3299 };
3300
3301 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3302         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3303         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3304         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3305         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3306         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3307         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3308         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3309         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3310         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3311         EVENT_CONSTRAINT_END
3312 };
3313
3314 static struct intel_uncore_type bdx_uncore_r2pcie = {
3315         .name           = "r2pcie",
3316         .num_counters   = 4,
3317         .num_boxes      = 1,
3318         .perf_ctr_bits  = 48,
3319         .constraints    = bdx_uncore_r2pcie_constraints,
3320         SNBEP_UNCORE_PCI_COMMON_INIT(),
3321 };
3322
3323 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3324         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3325         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3326         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3327         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3328         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3329         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3330         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3331         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3332         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3333         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3334         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3335         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3336         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3337         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3338         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3339         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3340         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3341         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3342         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3343         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3344         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3345         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3346         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3347         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3348         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3349         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3350         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3351         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3352         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3353         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3354         EVENT_CONSTRAINT_END
3355 };
3356
3357 static struct intel_uncore_type bdx_uncore_r3qpi = {
3358         .name           = "r3qpi",
3359         .num_counters   = 3,
3360         .num_boxes      = 3,
3361         .perf_ctr_bits  = 48,
3362         .constraints    = bdx_uncore_r3qpi_constraints,
3363         SNBEP_UNCORE_PCI_COMMON_INIT(),
3364 };
3365
3366 enum {
3367         BDX_PCI_UNCORE_HA,
3368         BDX_PCI_UNCORE_IMC,
3369         BDX_PCI_UNCORE_IRP,
3370         BDX_PCI_UNCORE_QPI,
3371         BDX_PCI_UNCORE_R2PCIE,
3372         BDX_PCI_UNCORE_R3QPI,
3373 };
3374
3375 static struct intel_uncore_type *bdx_pci_uncores[] = {
3376         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3377         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3378         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3379         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3380         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3381         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3382         NULL,
3383 };
3384
3385 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3386         { /* Home Agent 0 */
3387                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3388                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3389         },
3390         { /* Home Agent 1 */
3391                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3392                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3393         },
3394         { /* MC0 Channel 0 */
3395                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3396                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3397         },
3398         { /* MC0 Channel 1 */
3399                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3400                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3401         },
3402         { /* MC0 Channel 2 */
3403                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3404                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3405         },
3406         { /* MC0 Channel 3 */
3407                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3408                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3409         },
3410         { /* MC1 Channel 0 */
3411                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3412                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3413         },
3414         { /* MC1 Channel 1 */
3415                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3416                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3417         },
3418         { /* MC1 Channel 2 */
3419                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3420                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3421         },
3422         { /* MC1 Channel 3 */
3423                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3424                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3425         },
3426         { /* IRP */
3427                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3428                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3429         },
3430         { /* QPI0 Port 0 */
3431                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3432                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3433         },
3434         { /* QPI0 Port 1 */
3435                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3436                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3437         },
3438         { /* QPI1 Port 2 */
3439                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3440                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3441         },
3442         { /* R2PCIe */
3443                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3444                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3445         },
3446         { /* R3QPI0 Link 0 */
3447                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3448                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3449         },
3450         { /* R3QPI0 Link 1 */
3451                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3452                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3453         },
3454         { /* R3QPI1 Link 2 */
3455                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3456                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3457         },
3458         { /* QPI Port 0 filter  */
3459                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3460                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3461                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3462         },
3463         { /* QPI Port 1 filter  */
3464                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3465                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3466                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3467         },
3468         { /* QPI Port 2 filter  */
3469                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3470                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3471                                                    BDX_PCI_QPI_PORT2_FILTER),
3472         },
3473         { /* end: all zeroes */ }
3474 };
3475
3476 static struct pci_driver bdx_uncore_pci_driver = {
3477         .name           = "bdx_uncore",
3478         .id_table       = bdx_uncore_pci_ids,
3479 };
3480
3481 int bdx_uncore_pci_init(void)
3482 {
3483         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3484
3485         if (ret)
3486                 return ret;
3487         uncore_pci_uncores = bdx_pci_uncores;
3488         uncore_pci_driver = &bdx_uncore_pci_driver;
3489         return 0;
3490 }
3491
3492 /* end of BDX uncore support */
3493
3494 /* SKX uncore support */
3495
3496 static struct intel_uncore_type skx_uncore_ubox = {
3497         .name                   = "ubox",
3498         .num_counters           = 2,
3499         .num_boxes              = 1,
3500         .perf_ctr_bits          = 48,
3501         .fixed_ctr_bits         = 48,
3502         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3503         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3504         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3505         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3506         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3507         .ops                    = &ivbep_uncore_msr_ops,
3508         .format_group           = &ivbep_uncore_ubox_format_group,
3509 };
3510
3511 static struct attribute *skx_uncore_cha_formats_attr[] = {
3512         &format_attr_event.attr,
3513         &format_attr_umask.attr,
3514         &format_attr_edge.attr,
3515         &format_attr_tid_en.attr,
3516         &format_attr_inv.attr,
3517         &format_attr_thresh8.attr,
3518         &format_attr_filter_tid4.attr,
3519         &format_attr_filter_state5.attr,
3520         &format_attr_filter_rem.attr,
3521         &format_attr_filter_loc.attr,
3522         &format_attr_filter_nm.attr,
3523         &format_attr_filter_all_op.attr,
3524         &format_attr_filter_not_nm.attr,
3525         &format_attr_filter_opc_0.attr,
3526         &format_attr_filter_opc_1.attr,
3527         &format_attr_filter_nc.attr,
3528         &format_attr_filter_isoc.attr,
3529         NULL,
3530 };
3531
3532 static const struct attribute_group skx_uncore_chabox_format_group = {
3533         .name = "format",
3534         .attrs = skx_uncore_cha_formats_attr,
3535 };
3536
3537 static struct event_constraint skx_uncore_chabox_constraints[] = {
3538         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3539         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3540         EVENT_CONSTRAINT_END
3541 };
3542
3543 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3544         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3545         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3546         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3547         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3548         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3549         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3550         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3551         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3552         SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3553         EVENT_EXTRA_END
3554 };
3555
3556 static u64 skx_cha_filter_mask(int fields)
3557 {
3558         u64 mask = 0;
3559
3560         if (fields & 0x1)
3561                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3562         if (fields & 0x2)
3563                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3564         if (fields & 0x4)
3565                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3566         if (fields & 0x8) {
3567                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3568                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3569                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3570                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3571                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3572                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3573                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3574                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3575                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3576         }
3577         return mask;
3578 }
3579
3580 static struct event_constraint *
3581 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3582 {
3583         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3584 }
3585
3586 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3587 {
3588         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3589         struct extra_reg *er;
3590         int idx = 0;
3591
3592         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3593                 if (er->event != (event->hw.config & er->config_mask))
3594                         continue;
3595                 idx |= er->idx;
3596         }
3597
3598         if (idx) {
3599                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3600                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3601                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3602                 reg1->idx = idx;
3603         }
3604         return 0;
3605 }
3606
3607 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3608         /* There is no frz_en for chabox ctl */
3609         .init_box               = ivbep_uncore_msr_init_box,
3610         .disable_box            = snbep_uncore_msr_disable_box,
3611         .enable_box             = snbep_uncore_msr_enable_box,
3612         .disable_event          = snbep_uncore_msr_disable_event,
3613         .enable_event           = hswep_cbox_enable_event,
3614         .read_counter           = uncore_msr_read_counter,
3615         .hw_config              = skx_cha_hw_config,
3616         .get_constraint         = skx_cha_get_constraint,
3617         .put_constraint         = snbep_cbox_put_constraint,
3618 };
3619
3620 static struct intel_uncore_type skx_uncore_chabox = {
3621         .name                   = "cha",
3622         .num_counters           = 4,
3623         .perf_ctr_bits          = 48,
3624         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3625         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3626         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3627         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3628         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3629         .num_shared_regs        = 1,
3630         .constraints            = skx_uncore_chabox_constraints,
3631         .ops                    = &skx_uncore_chabox_ops,
3632         .format_group           = &skx_uncore_chabox_format_group,
3633 };
3634
3635 static struct attribute *skx_uncore_iio_formats_attr[] = {
3636         &format_attr_event.attr,
3637         &format_attr_umask.attr,
3638         &format_attr_edge.attr,
3639         &format_attr_inv.attr,
3640         &format_attr_thresh9.attr,
3641         &format_attr_ch_mask.attr,
3642         &format_attr_fc_mask.attr,
3643         NULL,
3644 };
3645
3646 static const struct attribute_group skx_uncore_iio_format_group = {
3647         .name = "format",
3648         .attrs = skx_uncore_iio_formats_attr,
3649 };
3650
3651 static struct event_constraint skx_uncore_iio_constraints[] = {
3652         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3653         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3654         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3655         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3656         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3657         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3658         EVENT_CONSTRAINT_END
3659 };
3660
3661 static void skx_iio_enable_event(struct intel_uncore_box *box,
3662                                  struct perf_event *event)
3663 {
3664         struct hw_perf_event *hwc = &event->hw;
3665
3666         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3667 }
3668
3669 static struct intel_uncore_ops skx_uncore_iio_ops = {
3670         .init_box               = ivbep_uncore_msr_init_box,
3671         .disable_box            = snbep_uncore_msr_disable_box,
3672         .enable_box             = snbep_uncore_msr_enable_box,
3673         .disable_event          = snbep_uncore_msr_disable_event,
3674         .enable_event           = skx_iio_enable_event,
3675         .read_counter           = uncore_msr_read_counter,
3676 };
3677
3678 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3679 {
3680         return pmu->type->topology[die].configuration >>
3681                (pmu->pmu_idx * BUS_NUM_STRIDE);
3682 }
3683
3684 static umode_t
3685 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3686 {
3687         struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3688
3689         /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3690         return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3691 }
3692
3693 static ssize_t skx_iio_mapping_show(struct device *dev,
3694                                     struct device_attribute *attr, char *buf)
3695 {
3696         struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3697         struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3698         long die = (long)ea->var;
3699
3700         return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3701                                            skx_iio_stack(pmu, die));
3702 }
3703
3704 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3705 {
3706         u64 msr_value;
3707
3708         if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3709                         !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3710                 return -ENXIO;
3711
3712         *topology = msr_value;
3713
3714         return 0;
3715 }
3716
3717 static int die_to_cpu(int die)
3718 {
3719         int res = 0, cpu, current_die;
3720         /*
3721          * Using cpus_read_lock() to ensure cpu is not going down between
3722          * looking at cpu_online_mask.
3723          */
3724         cpus_read_lock();
3725         for_each_online_cpu(cpu) {
3726                 current_die = topology_logical_die_id(cpu);
3727                 if (current_die == die) {
3728                         res = cpu;
3729                         break;
3730                 }
3731         }
3732         cpus_read_unlock();
3733         return res;
3734 }
3735
3736 static int skx_iio_get_topology(struct intel_uncore_type *type)
3737 {
3738         int die, ret = -EPERM;
3739
3740         type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3741                                  GFP_KERNEL);
3742         if (!type->topology)
3743                 return -ENOMEM;
3744
3745         for (die = 0; die < uncore_max_dies(); die++) {
3746                 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3747                                            &type->topology[die].configuration);
3748                 if (ret)
3749                         break;
3750
3751                 ret = uncore_die_to_segment(die);
3752                 if (ret < 0)
3753                         break;
3754
3755                 type->topology[die].segment = ret;
3756         }
3757
3758         if (ret < 0) {
3759                 kfree(type->topology);
3760                 type->topology = NULL;
3761         }
3762
3763         return ret;
3764 }
3765
3766 static struct attribute_group skx_iio_mapping_group = {
3767         .is_visible     = skx_iio_mapping_visible,
3768 };
3769
3770 static const struct attribute_group *skx_iio_attr_update[] = {
3771         &skx_iio_mapping_group,
3772         NULL,
3773 };
3774
3775 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3776 {
3777         char buf[64];
3778         int ret;
3779         long die = -1;
3780         struct attribute **attrs = NULL;
3781         struct dev_ext_attribute *eas = NULL;
3782
3783         ret = skx_iio_get_topology(type);
3784         if (ret < 0)
3785                 goto clear_attr_update;
3786
3787         ret = -ENOMEM;
3788
3789         /* One more for NULL. */
3790         attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3791         if (!attrs)
3792                 goto err;
3793
3794         eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3795         if (!eas)
3796                 goto err;
3797
3798         for (die = 0; die < uncore_max_dies(); die++) {
3799                 sprintf(buf, "die%ld", die);
3800                 sysfs_attr_init(&eas[die].attr.attr);
3801                 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3802                 if (!eas[die].attr.attr.name)
3803                         goto err;
3804                 eas[die].attr.attr.mode = 0444;
3805                 eas[die].attr.show = skx_iio_mapping_show;
3806                 eas[die].attr.store = NULL;
3807                 eas[die].var = (void *)die;
3808                 attrs[die] = &eas[die].attr.attr;
3809         }
3810         skx_iio_mapping_group.attrs = attrs;
3811
3812         return 0;
3813 err:
3814         for (; die >= 0; die--)
3815                 kfree(eas[die].attr.attr.name);
3816         kfree(eas);
3817         kfree(attrs);
3818         kfree(type->topology);
3819 clear_attr_update:
3820         type->attr_update = NULL;
3821         return ret;
3822 }
3823
3824 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3825 {
3826         struct attribute **attr = skx_iio_mapping_group.attrs;
3827
3828         if (!attr)
3829                 return;
3830
3831         for (; *attr; attr++)
3832                 kfree((*attr)->name);
3833         kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3834         kfree(skx_iio_mapping_group.attrs);
3835         skx_iio_mapping_group.attrs = NULL;
3836         kfree(type->topology);
3837 }
3838
3839 static struct intel_uncore_type skx_uncore_iio = {
3840         .name                   = "iio",
3841         .num_counters           = 4,
3842         .num_boxes              = 6,
3843         .perf_ctr_bits          = 48,
3844         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3845         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3846         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3847         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3848         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3849         .msr_offset             = SKX_IIO_MSR_OFFSET,
3850         .constraints            = skx_uncore_iio_constraints,
3851         .ops                    = &skx_uncore_iio_ops,
3852         .format_group           = &skx_uncore_iio_format_group,
3853         .attr_update            = skx_iio_attr_update,
3854         .set_mapping            = skx_iio_set_mapping,
3855         .cleanup_mapping        = skx_iio_cleanup_mapping,
3856 };
3857
3858 enum perf_uncore_iio_freerunning_type_id {
3859         SKX_IIO_MSR_IOCLK                       = 0,
3860         SKX_IIO_MSR_BW                          = 1,
3861         SKX_IIO_MSR_UTIL                        = 2,
3862
3863         SKX_IIO_FREERUNNING_TYPE_MAX,
3864 };
3865
3866
3867 static struct freerunning_counters skx_iio_freerunning[] = {
3868         [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3869         [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3870         [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3871 };
3872
3873 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3874         /* Free-Running IO CLOCKS Counter */
3875         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3876         /* Free-Running IIO BANDWIDTH Counters */
3877         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3878         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3879         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3880         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3881         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3882         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3883         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3884         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3885         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3886         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3887         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3888         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3889         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3890         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3891         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3892         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3893         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3894         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3895         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3896         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3897         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3898         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3899         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3900         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3901         /* Free-running IIO UTILIZATION Counters */
3902         INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3903         INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3904         INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3905         INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3906         INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3907         INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3908         INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3909         INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3910         { /* end: all zeroes */ },
3911 };
3912
3913 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3914         .read_counter           = uncore_msr_read_counter,
3915         .hw_config              = uncore_freerunning_hw_config,
3916 };
3917
3918 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3919         &format_attr_event.attr,
3920         &format_attr_umask.attr,
3921         NULL,
3922 };
3923
3924 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3925         .name = "format",
3926         .attrs = skx_uncore_iio_freerunning_formats_attr,
3927 };
3928
3929 static struct intel_uncore_type skx_uncore_iio_free_running = {
3930         .name                   = "iio_free_running",
3931         .num_counters           = 17,
3932         .num_boxes              = 6,
3933         .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3934         .freerunning            = skx_iio_freerunning,
3935         .ops                    = &skx_uncore_iio_freerunning_ops,
3936         .event_descs            = skx_uncore_iio_freerunning_events,
3937         .format_group           = &skx_uncore_iio_freerunning_format_group,
3938 };
3939
3940 static struct attribute *skx_uncore_formats_attr[] = {
3941         &format_attr_event.attr,
3942         &format_attr_umask.attr,
3943         &format_attr_edge.attr,
3944         &format_attr_inv.attr,
3945         &format_attr_thresh8.attr,
3946         NULL,
3947 };
3948
3949 static const struct attribute_group skx_uncore_format_group = {
3950         .name = "format",
3951         .attrs = skx_uncore_formats_attr,
3952 };
3953
3954 static struct intel_uncore_type skx_uncore_irp = {
3955         .name                   = "irp",
3956         .num_counters           = 2,
3957         .num_boxes              = 6,
3958         .perf_ctr_bits          = 48,
3959         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3960         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3961         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3962         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3963         .msr_offset             = SKX_IRP_MSR_OFFSET,
3964         .ops                    = &skx_uncore_iio_ops,
3965         .format_group           = &skx_uncore_format_group,
3966 };
3967
3968 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3969         &format_attr_event.attr,
3970         &format_attr_umask.attr,
3971         &format_attr_edge.attr,
3972         &format_attr_inv.attr,
3973         &format_attr_thresh8.attr,
3974         &format_attr_occ_invert.attr,
3975         &format_attr_occ_edge_det.attr,
3976         &format_attr_filter_band0.attr,
3977         &format_attr_filter_band1.attr,
3978         &format_attr_filter_band2.attr,
3979         &format_attr_filter_band3.attr,
3980         NULL,
3981 };
3982
3983 static struct attribute_group skx_uncore_pcu_format_group = {
3984         .name = "format",
3985         .attrs = skx_uncore_pcu_formats_attr,
3986 };
3987
3988 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3989         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3990         .hw_config              = hswep_pcu_hw_config,
3991         .get_constraint         = snbep_pcu_get_constraint,
3992         .put_constraint         = snbep_pcu_put_constraint,
3993 };
3994
3995 static struct intel_uncore_type skx_uncore_pcu = {
3996         .name                   = "pcu",
3997         .num_counters           = 4,
3998         .num_boxes              = 1,
3999         .perf_ctr_bits          = 48,
4000         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
4001         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
4002         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4003         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
4004         .num_shared_regs        = 1,
4005         .ops                    = &skx_uncore_pcu_ops,
4006         .format_group           = &skx_uncore_pcu_format_group,
4007 };
4008
4009 static struct intel_uncore_type *skx_msr_uncores[] = {
4010         &skx_uncore_ubox,
4011         &skx_uncore_chabox,
4012         &skx_uncore_iio,
4013         &skx_uncore_iio_free_running,
4014         &skx_uncore_irp,
4015         &skx_uncore_pcu,
4016         NULL,
4017 };
4018
4019 /*
4020  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4021  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4022  */
4023 #define SKX_CAPID6              0x9c
4024 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
4025
4026 static int skx_count_chabox(void)
4027 {
4028         struct pci_dev *dev = NULL;
4029         u32 val = 0;
4030
4031         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4032         if (!dev)
4033                 goto out;
4034
4035         pci_read_config_dword(dev, SKX_CAPID6, &val);
4036         val &= SKX_CHA_BIT_MASK;
4037 out:
4038         pci_dev_put(dev);
4039         return hweight32(val);
4040 }
4041
4042 void skx_uncore_cpu_init(void)
4043 {
4044         skx_uncore_chabox.num_boxes = skx_count_chabox();
4045         uncore_msr_uncores = skx_msr_uncores;
4046 }
4047
4048 static struct intel_uncore_type skx_uncore_imc = {
4049         .name           = "imc",
4050         .num_counters   = 4,
4051         .num_boxes      = 6,
4052         .perf_ctr_bits  = 48,
4053         .fixed_ctr_bits = 48,
4054         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4055         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4056         .event_descs    = hswep_uncore_imc_events,
4057         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4058         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4059         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4060         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4061         .ops            = &ivbep_uncore_pci_ops,
4062         .format_group   = &skx_uncore_format_group,
4063 };
4064
4065 static struct attribute *skx_upi_uncore_formats_attr[] = {
4066         &format_attr_event.attr,
4067         &format_attr_umask_ext.attr,
4068         &format_attr_edge.attr,
4069         &format_attr_inv.attr,
4070         &format_attr_thresh8.attr,
4071         NULL,
4072 };
4073
4074 static const struct attribute_group skx_upi_uncore_format_group = {
4075         .name = "format",
4076         .attrs = skx_upi_uncore_formats_attr,
4077 };
4078
4079 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4080 {
4081         struct pci_dev *pdev = box->pci_dev;
4082
4083         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4084         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4085 }
4086
4087 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4088         .init_box       = skx_upi_uncore_pci_init_box,
4089         .disable_box    = snbep_uncore_pci_disable_box,
4090         .enable_box     = snbep_uncore_pci_enable_box,
4091         .disable_event  = snbep_uncore_pci_disable_event,
4092         .enable_event   = snbep_uncore_pci_enable_event,
4093         .read_counter   = snbep_uncore_pci_read_counter,
4094 };
4095
4096 static struct intel_uncore_type skx_uncore_upi = {
4097         .name           = "upi",
4098         .num_counters   = 4,
4099         .num_boxes      = 3,
4100         .perf_ctr_bits  = 48,
4101         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
4102         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
4103         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4104         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4105         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
4106         .ops            = &skx_upi_uncore_pci_ops,
4107         .format_group   = &skx_upi_uncore_format_group,
4108 };
4109
4110 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4111 {
4112         struct pci_dev *pdev = box->pci_dev;
4113
4114         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4115         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4116 }
4117
4118 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4119         .init_box       = skx_m2m_uncore_pci_init_box,
4120         .disable_box    = snbep_uncore_pci_disable_box,
4121         .enable_box     = snbep_uncore_pci_enable_box,
4122         .disable_event  = snbep_uncore_pci_disable_event,
4123         .enable_event   = snbep_uncore_pci_enable_event,
4124         .read_counter   = snbep_uncore_pci_read_counter,
4125 };
4126
4127 static struct intel_uncore_type skx_uncore_m2m = {
4128         .name           = "m2m",
4129         .num_counters   = 4,
4130         .num_boxes      = 2,
4131         .perf_ctr_bits  = 48,
4132         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
4133         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
4134         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4135         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
4136         .ops            = &skx_m2m_uncore_pci_ops,
4137         .format_group   = &skx_uncore_format_group,
4138 };
4139
4140 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4141         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4142         EVENT_CONSTRAINT_END
4143 };
4144
4145 static struct intel_uncore_type skx_uncore_m2pcie = {
4146         .name           = "m2pcie",
4147         .num_counters   = 4,
4148         .num_boxes      = 4,
4149         .perf_ctr_bits  = 48,
4150         .constraints    = skx_uncore_m2pcie_constraints,
4151         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4152         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4153         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4154         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4155         .ops            = &ivbep_uncore_pci_ops,
4156         .format_group   = &skx_uncore_format_group,
4157 };
4158
4159 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4160         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4161         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4162         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4163         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4164         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4165         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4166         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4167         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4168         EVENT_CONSTRAINT_END
4169 };
4170
4171 static struct intel_uncore_type skx_uncore_m3upi = {
4172         .name           = "m3upi",
4173         .num_counters   = 3,
4174         .num_boxes      = 3,
4175         .perf_ctr_bits  = 48,
4176         .constraints    = skx_uncore_m3upi_constraints,
4177         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4178         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4179         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4180         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4181         .ops            = &ivbep_uncore_pci_ops,
4182         .format_group   = &skx_uncore_format_group,
4183 };
4184
4185 enum {
4186         SKX_PCI_UNCORE_IMC,
4187         SKX_PCI_UNCORE_M2M,
4188         SKX_PCI_UNCORE_UPI,
4189         SKX_PCI_UNCORE_M2PCIE,
4190         SKX_PCI_UNCORE_M3UPI,
4191 };
4192
4193 static struct intel_uncore_type *skx_pci_uncores[] = {
4194         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
4195         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
4196         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
4197         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4198         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
4199         NULL,
4200 };
4201
4202 static const struct pci_device_id skx_uncore_pci_ids[] = {
4203         { /* MC0 Channel 0 */
4204                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4205                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4206         },
4207         { /* MC0 Channel 1 */
4208                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4209                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4210         },
4211         { /* MC0 Channel 2 */
4212                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4213                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4214         },
4215         { /* MC1 Channel 0 */
4216                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4217                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4218         },
4219         { /* MC1 Channel 1 */
4220                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4221                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4222         },
4223         { /* MC1 Channel 2 */
4224                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4225                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4226         },
4227         { /* M2M0 */
4228                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4229                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4230         },
4231         { /* M2M1 */
4232                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4233                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4234         },
4235         { /* UPI0 Link 0 */
4236                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4237                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4238         },
4239         { /* UPI0 Link 1 */
4240                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4241                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4242         },
4243         { /* UPI1 Link 2 */
4244                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4245                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4246         },
4247         { /* M2PCIe 0 */
4248                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4249                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4250         },
4251         { /* M2PCIe 1 */
4252                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4253                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4254         },
4255         { /* M2PCIe 2 */
4256                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4257                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4258         },
4259         { /* M2PCIe 3 */
4260                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4261                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4262         },
4263         { /* M3UPI0 Link 0 */
4264                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4265                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4266         },
4267         { /* M3UPI0 Link 1 */
4268                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4269                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4270         },
4271         { /* M3UPI1 Link 2 */
4272                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4273                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4274         },
4275         { /* end: all zeroes */ }
4276 };
4277
4278
4279 static struct pci_driver skx_uncore_pci_driver = {
4280         .name           = "skx_uncore",
4281         .id_table       = skx_uncore_pci_ids,
4282 };
4283
4284 int skx_uncore_pci_init(void)
4285 {
4286         /* need to double check pci address */
4287         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4288
4289         if (ret)
4290                 return ret;
4291
4292         uncore_pci_uncores = skx_pci_uncores;
4293         uncore_pci_driver = &skx_uncore_pci_driver;
4294         return 0;
4295 }
4296
4297 /* end of SKX uncore support */
4298
4299 /* SNR uncore support */
4300
4301 static struct intel_uncore_type snr_uncore_ubox = {
4302         .name                   = "ubox",
4303         .num_counters           = 2,
4304         .num_boxes              = 1,
4305         .perf_ctr_bits          = 48,
4306         .fixed_ctr_bits         = 48,
4307         .perf_ctr               = SNR_U_MSR_PMON_CTR0,
4308         .event_ctl              = SNR_U_MSR_PMON_CTL0,
4309         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4310         .fixed_ctr              = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4311         .fixed_ctl              = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4312         .ops                    = &ivbep_uncore_msr_ops,
4313         .format_group           = &ivbep_uncore_format_group,
4314 };
4315
4316 static struct attribute *snr_uncore_cha_formats_attr[] = {
4317         &format_attr_event.attr,
4318         &format_attr_umask_ext2.attr,
4319         &format_attr_edge.attr,
4320         &format_attr_tid_en.attr,
4321         &format_attr_inv.attr,
4322         &format_attr_thresh8.attr,
4323         &format_attr_filter_tid5.attr,
4324         NULL,
4325 };
4326 static const struct attribute_group snr_uncore_chabox_format_group = {
4327         .name = "format",
4328         .attrs = snr_uncore_cha_formats_attr,
4329 };
4330
4331 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4332 {
4333         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4334
4335         reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4336                     box->pmu->type->msr_offset * box->pmu->pmu_idx;
4337         reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4338         reg1->idx = 0;
4339
4340         return 0;
4341 }
4342
4343 static void snr_cha_enable_event(struct intel_uncore_box *box,
4344                                    struct perf_event *event)
4345 {
4346         struct hw_perf_event *hwc = &event->hw;
4347         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4348
4349         if (reg1->idx != EXTRA_REG_NONE)
4350                 wrmsrl(reg1->reg, reg1->config);
4351
4352         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4353 }
4354
4355 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4356         .init_box               = ivbep_uncore_msr_init_box,
4357         .disable_box            = snbep_uncore_msr_disable_box,
4358         .enable_box             = snbep_uncore_msr_enable_box,
4359         .disable_event          = snbep_uncore_msr_disable_event,
4360         .enable_event           = snr_cha_enable_event,
4361         .read_counter           = uncore_msr_read_counter,
4362         .hw_config              = snr_cha_hw_config,
4363 };
4364
4365 static struct intel_uncore_type snr_uncore_chabox = {
4366         .name                   = "cha",
4367         .num_counters           = 4,
4368         .num_boxes              = 6,
4369         .perf_ctr_bits          = 48,
4370         .event_ctl              = SNR_CHA_MSR_PMON_CTL0,
4371         .perf_ctr               = SNR_CHA_MSR_PMON_CTR0,
4372         .box_ctl                = SNR_CHA_MSR_PMON_BOX_CTL,
4373         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
4374         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4375         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4376         .ops                    = &snr_uncore_chabox_ops,
4377         .format_group           = &snr_uncore_chabox_format_group,
4378 };
4379
4380 static struct attribute *snr_uncore_iio_formats_attr[] = {
4381         &format_attr_event.attr,
4382         &format_attr_umask.attr,
4383         &format_attr_edge.attr,
4384         &format_attr_inv.attr,
4385         &format_attr_thresh9.attr,
4386         &format_attr_ch_mask2.attr,
4387         &format_attr_fc_mask2.attr,
4388         NULL,
4389 };
4390
4391 static const struct attribute_group snr_uncore_iio_format_group = {
4392         .name = "format",
4393         .attrs = snr_uncore_iio_formats_attr,
4394 };
4395
4396 static struct intel_uncore_type snr_uncore_iio = {
4397         .name                   = "iio",
4398         .num_counters           = 4,
4399         .num_boxes              = 5,
4400         .perf_ctr_bits          = 48,
4401         .event_ctl              = SNR_IIO_MSR_PMON_CTL0,
4402         .perf_ctr               = SNR_IIO_MSR_PMON_CTR0,
4403         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4404         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4405         .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
4406         .msr_offset             = SNR_IIO_MSR_OFFSET,
4407         .ops                    = &ivbep_uncore_msr_ops,
4408         .format_group           = &snr_uncore_iio_format_group,
4409 };
4410
4411 static struct intel_uncore_type snr_uncore_irp = {
4412         .name                   = "irp",
4413         .num_counters           = 2,
4414         .num_boxes              = 5,
4415         .perf_ctr_bits          = 48,
4416         .event_ctl              = SNR_IRP0_MSR_PMON_CTL0,
4417         .perf_ctr               = SNR_IRP0_MSR_PMON_CTR0,
4418         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4419         .box_ctl                = SNR_IRP0_MSR_PMON_BOX_CTL,
4420         .msr_offset             = SNR_IRP_MSR_OFFSET,
4421         .ops                    = &ivbep_uncore_msr_ops,
4422         .format_group           = &ivbep_uncore_format_group,
4423 };
4424
4425 static struct intel_uncore_type snr_uncore_m2pcie = {
4426         .name           = "m2pcie",
4427         .num_counters   = 4,
4428         .num_boxes      = 5,
4429         .perf_ctr_bits  = 48,
4430         .event_ctl      = SNR_M2PCIE_MSR_PMON_CTL0,
4431         .perf_ctr       = SNR_M2PCIE_MSR_PMON_CTR0,
4432         .box_ctl        = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4433         .msr_offset     = SNR_M2PCIE_MSR_OFFSET,
4434         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4435         .ops            = &ivbep_uncore_msr_ops,
4436         .format_group   = &ivbep_uncore_format_group,
4437 };
4438
4439 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4440 {
4441         struct hw_perf_event *hwc = &event->hw;
4442         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4443         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4444
4445         if (ev_sel >= 0xb && ev_sel <= 0xe) {
4446                 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4447                 reg1->idx = ev_sel - 0xb;
4448                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4449         }
4450         return 0;
4451 }
4452
4453 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4454         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4455         .hw_config              = snr_pcu_hw_config,
4456         .get_constraint         = snbep_pcu_get_constraint,
4457         .put_constraint         = snbep_pcu_put_constraint,
4458 };
4459
4460 static struct intel_uncore_type snr_uncore_pcu = {
4461         .name                   = "pcu",
4462         .num_counters           = 4,
4463         .num_boxes              = 1,
4464         .perf_ctr_bits          = 48,
4465         .perf_ctr               = SNR_PCU_MSR_PMON_CTR0,
4466         .event_ctl              = SNR_PCU_MSR_PMON_CTL0,
4467         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4468         .box_ctl                = SNR_PCU_MSR_PMON_BOX_CTL,
4469         .num_shared_regs        = 1,
4470         .ops                    = &snr_uncore_pcu_ops,
4471         .format_group           = &skx_uncore_pcu_format_group,
4472 };
4473
4474 enum perf_uncore_snr_iio_freerunning_type_id {
4475         SNR_IIO_MSR_IOCLK,
4476         SNR_IIO_MSR_BW_IN,
4477
4478         SNR_IIO_FREERUNNING_TYPE_MAX,
4479 };
4480
4481 static struct freerunning_counters snr_iio_freerunning[] = {
4482         [SNR_IIO_MSR_IOCLK]     = { 0x1eac, 0x1, 0x10, 1, 48 },
4483         [SNR_IIO_MSR_BW_IN]     = { 0x1f00, 0x1, 0x10, 8, 48 },
4484 };
4485
4486 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4487         /* Free-Running IIO CLOCKS Counter */
4488         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
4489         /* Free-Running IIO BANDWIDTH IN Counters */
4490         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
4491         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
4492         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
4493         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
4494         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
4495         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
4496         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
4497         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
4498         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
4499         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
4500         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
4501         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
4502         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
4503         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
4504         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
4505         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
4506         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
4507         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
4508         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
4509         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
4510         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
4511         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
4512         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
4513         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
4514         { /* end: all zeroes */ },
4515 };
4516
4517 static struct intel_uncore_type snr_uncore_iio_free_running = {
4518         .name                   = "iio_free_running",
4519         .num_counters           = 9,
4520         .num_boxes              = 5,
4521         .num_freerunning_types  = SNR_IIO_FREERUNNING_TYPE_MAX,
4522         .freerunning            = snr_iio_freerunning,
4523         .ops                    = &skx_uncore_iio_freerunning_ops,
4524         .event_descs            = snr_uncore_iio_freerunning_events,
4525         .format_group           = &skx_uncore_iio_freerunning_format_group,
4526 };
4527
4528 static struct intel_uncore_type *snr_msr_uncores[] = {
4529         &snr_uncore_ubox,
4530         &snr_uncore_chabox,
4531         &snr_uncore_iio,
4532         &snr_uncore_irp,
4533         &snr_uncore_m2pcie,
4534         &snr_uncore_pcu,
4535         &snr_uncore_iio_free_running,
4536         NULL,
4537 };
4538
4539 void snr_uncore_cpu_init(void)
4540 {
4541         uncore_msr_uncores = snr_msr_uncores;
4542 }
4543
4544 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4545 {
4546         struct pci_dev *pdev = box->pci_dev;
4547         int box_ctl = uncore_pci_box_ctl(box);
4548
4549         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4550         pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4551 }
4552
4553 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4554         .init_box       = snr_m2m_uncore_pci_init_box,
4555         .disable_box    = snbep_uncore_pci_disable_box,
4556         .enable_box     = snbep_uncore_pci_enable_box,
4557         .disable_event  = snbep_uncore_pci_disable_event,
4558         .enable_event   = snbep_uncore_pci_enable_event,
4559         .read_counter   = snbep_uncore_pci_read_counter,
4560 };
4561
4562 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4563         &format_attr_event.attr,
4564         &format_attr_umask_ext3.attr,
4565         &format_attr_edge.attr,
4566         &format_attr_inv.attr,
4567         &format_attr_thresh8.attr,
4568         NULL,
4569 };
4570
4571 static const struct attribute_group snr_m2m_uncore_format_group = {
4572         .name = "format",
4573         .attrs = snr_m2m_uncore_formats_attr,
4574 };
4575
4576 static struct intel_uncore_type snr_uncore_m2m = {
4577         .name           = "m2m",
4578         .num_counters   = 4,
4579         .num_boxes      = 1,
4580         .perf_ctr_bits  = 48,
4581         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
4582         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
4583         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4584         .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4585         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
4586         .ops            = &snr_m2m_uncore_pci_ops,
4587         .format_group   = &snr_m2m_uncore_format_group,
4588 };
4589
4590 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4591 {
4592         struct pci_dev *pdev = box->pci_dev;
4593         struct hw_perf_event *hwc = &event->hw;
4594
4595         pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4596         pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4597 }
4598
4599 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4600         .init_box       = snr_m2m_uncore_pci_init_box,
4601         .disable_box    = snbep_uncore_pci_disable_box,
4602         .enable_box     = snbep_uncore_pci_enable_box,
4603         .disable_event  = snbep_uncore_pci_disable_event,
4604         .enable_event   = snr_uncore_pci_enable_event,
4605         .read_counter   = snbep_uncore_pci_read_counter,
4606 };
4607
4608 static struct intel_uncore_type snr_uncore_pcie3 = {
4609         .name           = "pcie3",
4610         .num_counters   = 4,
4611         .num_boxes      = 1,
4612         .perf_ctr_bits  = 48,
4613         .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
4614         .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
4615         .event_mask     = SKX_IIO_PMON_RAW_EVENT_MASK,
4616         .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4617         .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
4618         .ops            = &snr_pcie3_uncore_pci_ops,
4619         .format_group   = &skx_uncore_iio_format_group,
4620 };
4621
4622 enum {
4623         SNR_PCI_UNCORE_M2M,
4624         SNR_PCI_UNCORE_PCIE3,
4625 };
4626
4627 static struct intel_uncore_type *snr_pci_uncores[] = {
4628         [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
4629         [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
4630         NULL,
4631 };
4632
4633 static const struct pci_device_id snr_uncore_pci_ids[] = {
4634         { /* M2M */
4635                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4636                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4637         },
4638         { /* end: all zeroes */ }
4639 };
4640
4641 static struct pci_driver snr_uncore_pci_driver = {
4642         .name           = "snr_uncore",
4643         .id_table       = snr_uncore_pci_ids,
4644 };
4645
4646 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4647         { /* PCIe3 RP */
4648                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4649                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4650         },
4651         { /* end: all zeroes */ }
4652 };
4653
4654 static struct pci_driver snr_uncore_pci_sub_driver = {
4655         .name           = "snr_uncore_sub",
4656         .id_table       = snr_uncore_pci_sub_ids,
4657 };
4658
4659 int snr_uncore_pci_init(void)
4660 {
4661         /* SNR UBOX DID */
4662         int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4663                                          SKX_GIDNIDMAP, true);
4664
4665         if (ret)
4666                 return ret;
4667
4668         uncore_pci_uncores = snr_pci_uncores;
4669         uncore_pci_driver = &snr_uncore_pci_driver;
4670         uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4671         return 0;
4672 }
4673
4674 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4675 {
4676         struct pci_dev *mc_dev = NULL;
4677         int pkg;
4678
4679         while (1) {
4680                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4681                 if (!mc_dev)
4682                         break;
4683                 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4684                 if (pkg == id)
4685                         break;
4686         }
4687         return mc_dev;
4688 }
4689
4690 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4691                                        unsigned int box_ctl, int mem_offset)
4692 {
4693         struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4694         struct intel_uncore_type *type = box->pmu->type;
4695         resource_size_t addr;
4696         u32 pci_dword;
4697
4698         if (!pdev)
4699                 return;
4700
4701         pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4702         addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4703
4704         pci_read_config_dword(pdev, mem_offset, &pci_dword);
4705         addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4706
4707         addr += box_ctl;
4708
4709         box->io_addr = ioremap(addr, type->mmio_map_size);
4710         if (!box->io_addr) {
4711                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4712                 return;
4713         }
4714
4715         writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4716 }
4717
4718 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4719 {
4720         __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4721                                    SNR_IMC_MMIO_MEM0_OFFSET);
4722 }
4723
4724 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4725 {
4726         u32 config;
4727
4728         if (!box->io_addr)
4729                 return;
4730
4731         config = readl(box->io_addr);
4732         config |= SNBEP_PMON_BOX_CTL_FRZ;
4733         writel(config, box->io_addr);
4734 }
4735
4736 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4737 {
4738         u32 config;
4739
4740         if (!box->io_addr)
4741                 return;
4742
4743         config = readl(box->io_addr);
4744         config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4745         writel(config, box->io_addr);
4746 }
4747
4748 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4749                                            struct perf_event *event)
4750 {
4751         struct hw_perf_event *hwc = &event->hw;
4752
4753         if (!box->io_addr)
4754                 return;
4755
4756         if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4757                 return;
4758
4759         writel(hwc->config | SNBEP_PMON_CTL_EN,
4760                box->io_addr + hwc->config_base);
4761 }
4762
4763 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4764                                             struct perf_event *event)
4765 {
4766         struct hw_perf_event *hwc = &event->hw;
4767
4768         if (!box->io_addr)
4769                 return;
4770
4771         if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4772                 return;
4773
4774         writel(hwc->config, box->io_addr + hwc->config_base);
4775 }
4776
4777 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4778         .init_box       = snr_uncore_mmio_init_box,
4779         .exit_box       = uncore_mmio_exit_box,
4780         .disable_box    = snr_uncore_mmio_disable_box,
4781         .enable_box     = snr_uncore_mmio_enable_box,
4782         .disable_event  = snr_uncore_mmio_disable_event,
4783         .enable_event   = snr_uncore_mmio_enable_event,
4784         .read_counter   = uncore_mmio_read_counter,
4785 };
4786
4787 static struct uncore_event_desc snr_uncore_imc_events[] = {
4788         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4789         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4790         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4791         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4792         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4793         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4794         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4795         { /* end: all zeroes */ },
4796 };
4797
4798 static struct intel_uncore_type snr_uncore_imc = {
4799         .name           = "imc",
4800         .num_counters   = 4,
4801         .num_boxes      = 2,
4802         .perf_ctr_bits  = 48,
4803         .fixed_ctr_bits = 48,
4804         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
4805         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
4806         .event_descs    = snr_uncore_imc_events,
4807         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
4808         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
4809         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4810         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
4811         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
4812         .mmio_map_size  = SNR_IMC_MMIO_SIZE,
4813         .ops            = &snr_uncore_mmio_ops,
4814         .format_group   = &skx_uncore_format_group,
4815 };
4816
4817 enum perf_uncore_snr_imc_freerunning_type_id {
4818         SNR_IMC_DCLK,
4819         SNR_IMC_DDR,
4820
4821         SNR_IMC_FREERUNNING_TYPE_MAX,
4822 };
4823
4824 static struct freerunning_counters snr_imc_freerunning[] = {
4825         [SNR_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
4826         [SNR_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
4827 };
4828
4829 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4830         INTEL_UNCORE_EVENT_DESC(dclk,           "event=0xff,umask=0x10"),
4831
4832         INTEL_UNCORE_EVENT_DESC(read,           "event=0xff,umask=0x20"),
4833         INTEL_UNCORE_EVENT_DESC(read.scale,     "6.103515625e-5"),
4834         INTEL_UNCORE_EVENT_DESC(read.unit,      "MiB"),
4835         INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
4836         INTEL_UNCORE_EVENT_DESC(write.scale,    "6.103515625e-5"),
4837         INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
4838         { /* end: all zeroes */ },
4839 };
4840
4841 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4842         .init_box       = snr_uncore_mmio_init_box,
4843         .exit_box       = uncore_mmio_exit_box,
4844         .read_counter   = uncore_mmio_read_counter,
4845         .hw_config      = uncore_freerunning_hw_config,
4846 };
4847
4848 static struct intel_uncore_type snr_uncore_imc_free_running = {
4849         .name                   = "imc_free_running",
4850         .num_counters           = 3,
4851         .num_boxes              = 1,
4852         .num_freerunning_types  = SNR_IMC_FREERUNNING_TYPE_MAX,
4853         .mmio_map_size          = SNR_IMC_MMIO_SIZE,
4854         .freerunning            = snr_imc_freerunning,
4855         .ops                    = &snr_uncore_imc_freerunning_ops,
4856         .event_descs            = snr_uncore_imc_freerunning_events,
4857         .format_group           = &skx_uncore_iio_freerunning_format_group,
4858 };
4859
4860 static struct intel_uncore_type *snr_mmio_uncores[] = {
4861         &snr_uncore_imc,
4862         &snr_uncore_imc_free_running,
4863         NULL,
4864 };
4865
4866 void snr_uncore_mmio_init(void)
4867 {
4868         uncore_mmio_uncores = snr_mmio_uncores;
4869 }
4870
4871 /* end of SNR uncore support */
4872
4873 /* ICX uncore support */
4874
4875 static unsigned icx_cha_msr_offsets[] = {
4876         0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4877         0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4878         0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4879         0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
4880         0x1c,  0x2a,  0x38,  0x46,
4881 };
4882
4883 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4884 {
4885         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4886         bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4887
4888         if (tie_en) {
4889                 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4890                             icx_cha_msr_offsets[box->pmu->pmu_idx];
4891                 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4892                 reg1->idx = 0;
4893         }
4894
4895         return 0;
4896 }
4897
4898 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4899         .init_box               = ivbep_uncore_msr_init_box,
4900         .disable_box            = snbep_uncore_msr_disable_box,
4901         .enable_box             = snbep_uncore_msr_enable_box,
4902         .disable_event          = snbep_uncore_msr_disable_event,
4903         .enable_event           = snr_cha_enable_event,
4904         .read_counter           = uncore_msr_read_counter,
4905         .hw_config              = icx_cha_hw_config,
4906 };
4907
4908 static struct intel_uncore_type icx_uncore_chabox = {
4909         .name                   = "cha",
4910         .num_counters           = 4,
4911         .perf_ctr_bits          = 48,
4912         .event_ctl              = ICX_C34_MSR_PMON_CTL0,
4913         .perf_ctr               = ICX_C34_MSR_PMON_CTR0,
4914         .box_ctl                = ICX_C34_MSR_PMON_BOX_CTL,
4915         .msr_offsets            = icx_cha_msr_offsets,
4916         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4917         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4918         .constraints            = skx_uncore_chabox_constraints,
4919         .ops                    = &icx_uncore_chabox_ops,
4920         .format_group           = &snr_uncore_chabox_format_group,
4921 };
4922
4923 static unsigned icx_msr_offsets[] = {
4924         0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4925 };
4926
4927 static struct event_constraint icx_uncore_iio_constraints[] = {
4928         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4929         UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4930         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4931         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4932         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4933         EVENT_CONSTRAINT_END
4934 };
4935
4936 static struct intel_uncore_type icx_uncore_iio = {
4937         .name                   = "iio",
4938         .num_counters           = 4,
4939         .num_boxes              = 6,
4940         .perf_ctr_bits          = 48,
4941         .event_ctl              = ICX_IIO_MSR_PMON_CTL0,
4942         .perf_ctr               = ICX_IIO_MSR_PMON_CTR0,
4943         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4944         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4945         .box_ctl                = ICX_IIO_MSR_PMON_BOX_CTL,
4946         .msr_offsets            = icx_msr_offsets,
4947         .constraints            = icx_uncore_iio_constraints,
4948         .ops                    = &skx_uncore_iio_ops,
4949         .format_group           = &snr_uncore_iio_format_group,
4950 };
4951
4952 static struct intel_uncore_type icx_uncore_irp = {
4953         .name                   = "irp",
4954         .num_counters           = 2,
4955         .num_boxes              = 6,
4956         .perf_ctr_bits          = 48,
4957         .event_ctl              = ICX_IRP0_MSR_PMON_CTL0,
4958         .perf_ctr               = ICX_IRP0_MSR_PMON_CTR0,
4959         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4960         .box_ctl                = ICX_IRP0_MSR_PMON_BOX_CTL,
4961         .msr_offsets            = icx_msr_offsets,
4962         .ops                    = &ivbep_uncore_msr_ops,
4963         .format_group           = &ivbep_uncore_format_group,
4964 };
4965
4966 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4967         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4968         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4969         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4970         EVENT_CONSTRAINT_END
4971 };
4972
4973 static struct intel_uncore_type icx_uncore_m2pcie = {
4974         .name           = "m2pcie",
4975         .num_counters   = 4,
4976         .num_boxes      = 6,
4977         .perf_ctr_bits  = 48,
4978         .event_ctl      = ICX_M2PCIE_MSR_PMON_CTL0,
4979         .perf_ctr       = ICX_M2PCIE_MSR_PMON_CTR0,
4980         .box_ctl        = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4981         .msr_offsets    = icx_msr_offsets,
4982         .constraints    = icx_uncore_m2pcie_constraints,
4983         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4984         .ops            = &ivbep_uncore_msr_ops,
4985         .format_group   = &ivbep_uncore_format_group,
4986 };
4987
4988 enum perf_uncore_icx_iio_freerunning_type_id {
4989         ICX_IIO_MSR_IOCLK,
4990         ICX_IIO_MSR_BW_IN,
4991
4992         ICX_IIO_FREERUNNING_TYPE_MAX,
4993 };
4994
4995 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4996         0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4997 };
4998
4999 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5000         0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5001 };
5002
5003 static struct freerunning_counters icx_iio_freerunning[] = {
5004         [ICX_IIO_MSR_IOCLK]     = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5005         [ICX_IIO_MSR_BW_IN]     = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5006 };
5007
5008 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5009         /* Free-Running IIO CLOCKS Counter */
5010         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
5011         /* Free-Running IIO BANDWIDTH IN Counters */
5012         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
5013         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
5014         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
5015         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
5016         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
5017         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
5018         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
5019         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
5020         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
5021         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
5022         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
5023         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
5024         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
5025         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
5026         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
5027         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
5028         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
5029         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
5030         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
5031         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
5032         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
5033         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
5034         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
5035         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
5036         { /* end: all zeroes */ },
5037 };
5038
5039 static struct intel_uncore_type icx_uncore_iio_free_running = {
5040         .name                   = "iio_free_running",
5041         .num_counters           = 9,
5042         .num_boxes              = 6,
5043         .num_freerunning_types  = ICX_IIO_FREERUNNING_TYPE_MAX,
5044         .freerunning            = icx_iio_freerunning,
5045         .ops                    = &skx_uncore_iio_freerunning_ops,
5046         .event_descs            = icx_uncore_iio_freerunning_events,
5047         .format_group           = &skx_uncore_iio_freerunning_format_group,
5048 };
5049
5050 static struct intel_uncore_type *icx_msr_uncores[] = {
5051         &skx_uncore_ubox,
5052         &icx_uncore_chabox,
5053         &icx_uncore_iio,
5054         &icx_uncore_irp,
5055         &icx_uncore_m2pcie,
5056         &skx_uncore_pcu,
5057         &icx_uncore_iio_free_running,
5058         NULL,
5059 };
5060
5061 /*
5062  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5063  * registers which located at Device 30, Function 3
5064  */
5065 #define ICX_CAPID6              0x9c
5066 #define ICX_CAPID7              0xa0
5067
5068 static u64 icx_count_chabox(void)
5069 {
5070         struct pci_dev *dev = NULL;
5071         u64 caps = 0;
5072
5073         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5074         if (!dev)
5075                 goto out;
5076
5077         pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5078         pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5079 out:
5080         pci_dev_put(dev);
5081         return hweight64(caps);
5082 }
5083
5084 void icx_uncore_cpu_init(void)
5085 {
5086         u64 num_boxes = icx_count_chabox();
5087
5088         if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5089                 return;
5090         icx_uncore_chabox.num_boxes = num_boxes;
5091         uncore_msr_uncores = icx_msr_uncores;
5092 }
5093
5094 static struct intel_uncore_type icx_uncore_m2m = {
5095         .name           = "m2m",
5096         .num_counters   = 4,
5097         .num_boxes      = 4,
5098         .perf_ctr_bits  = 48,
5099         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
5100         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
5101         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5102         .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5103         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
5104         .ops            = &snr_m2m_uncore_pci_ops,
5105         .format_group   = &snr_m2m_uncore_format_group,
5106 };
5107
5108 static struct attribute *icx_upi_uncore_formats_attr[] = {
5109         &format_attr_event.attr,
5110         &format_attr_umask_ext4.attr,
5111         &format_attr_edge.attr,
5112         &format_attr_inv.attr,
5113         &format_attr_thresh8.attr,
5114         NULL,
5115 };
5116
5117 static const struct attribute_group icx_upi_uncore_format_group = {
5118         .name = "format",
5119         .attrs = icx_upi_uncore_formats_attr,
5120 };
5121
5122 static struct intel_uncore_type icx_uncore_upi = {
5123         .name           = "upi",
5124         .num_counters   = 4,
5125         .num_boxes      = 3,
5126         .perf_ctr_bits  = 48,
5127         .perf_ctr       = ICX_UPI_PCI_PMON_CTR0,
5128         .event_ctl      = ICX_UPI_PCI_PMON_CTL0,
5129         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5130         .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5131         .box_ctl        = ICX_UPI_PCI_PMON_BOX_CTL,
5132         .ops            = &skx_upi_uncore_pci_ops,
5133         .format_group   = &icx_upi_uncore_format_group,
5134 };
5135
5136 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5137         UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5138         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5139         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5140         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5141         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5142         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5143         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5144         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5145         EVENT_CONSTRAINT_END
5146 };
5147
5148 static struct intel_uncore_type icx_uncore_m3upi = {
5149         .name           = "m3upi",
5150         .num_counters   = 4,
5151         .num_boxes      = 3,
5152         .perf_ctr_bits  = 48,
5153         .perf_ctr       = ICX_M3UPI_PCI_PMON_CTR0,
5154         .event_ctl      = ICX_M3UPI_PCI_PMON_CTL0,
5155         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5156         .box_ctl        = ICX_M3UPI_PCI_PMON_BOX_CTL,
5157         .constraints    = icx_uncore_m3upi_constraints,
5158         .ops            = &ivbep_uncore_pci_ops,
5159         .format_group   = &skx_uncore_format_group,
5160 };
5161
5162 enum {
5163         ICX_PCI_UNCORE_M2M,
5164         ICX_PCI_UNCORE_UPI,
5165         ICX_PCI_UNCORE_M3UPI,
5166 };
5167
5168 static struct intel_uncore_type *icx_pci_uncores[] = {
5169         [ICX_PCI_UNCORE_M2M]            = &icx_uncore_m2m,
5170         [ICX_PCI_UNCORE_UPI]            = &icx_uncore_upi,
5171         [ICX_PCI_UNCORE_M3UPI]          = &icx_uncore_m3upi,
5172         NULL,
5173 };
5174
5175 static const struct pci_device_id icx_uncore_pci_ids[] = {
5176         { /* M2M 0 */
5177                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5178                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5179         },
5180         { /* M2M 1 */
5181                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5182                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5183         },
5184         { /* M2M 2 */
5185                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5186                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5187         },
5188         { /* M2M 3 */
5189                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5190                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5191         },
5192         { /* UPI Link 0 */
5193                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5194                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5195         },
5196         { /* UPI Link 1 */
5197                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5198                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5199         },
5200         { /* UPI Link 2 */
5201                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5202                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5203         },
5204         { /* M3UPI Link 0 */
5205                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5206                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5207         },
5208         { /* M3UPI Link 1 */
5209                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5210                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5211         },
5212         { /* M3UPI Link 2 */
5213                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5214                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5215         },
5216         { /* end: all zeroes */ }
5217 };
5218
5219 static struct pci_driver icx_uncore_pci_driver = {
5220         .name           = "icx_uncore",
5221         .id_table       = icx_uncore_pci_ids,
5222 };
5223
5224 int icx_uncore_pci_init(void)
5225 {
5226         /* ICX UBOX DID */
5227         int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5228                                          SKX_GIDNIDMAP, true);
5229
5230         if (ret)
5231                 return ret;
5232
5233         uncore_pci_uncores = icx_pci_uncores;
5234         uncore_pci_driver = &icx_uncore_pci_driver;
5235         return 0;
5236 }
5237
5238 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5239 {
5240         unsigned int box_ctl = box->pmu->type->box_ctl +
5241                                box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5242         int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5243                          SNR_IMC_MMIO_MEM0_OFFSET;
5244
5245         __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5246 }
5247
5248 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5249         .init_box       = icx_uncore_imc_init_box,
5250         .exit_box       = uncore_mmio_exit_box,
5251         .disable_box    = snr_uncore_mmio_disable_box,
5252         .enable_box     = snr_uncore_mmio_enable_box,
5253         .disable_event  = snr_uncore_mmio_disable_event,
5254         .enable_event   = snr_uncore_mmio_enable_event,
5255         .read_counter   = uncore_mmio_read_counter,
5256 };
5257
5258 static struct intel_uncore_type icx_uncore_imc = {
5259         .name           = "imc",
5260         .num_counters   = 4,
5261         .num_boxes      = 8,
5262         .perf_ctr_bits  = 48,
5263         .fixed_ctr_bits = 48,
5264         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
5265         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
5266         .event_descs    = hswep_uncore_imc_events,
5267         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
5268         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
5269         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5270         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
5271         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
5272         .mmio_map_size  = SNR_IMC_MMIO_SIZE,
5273         .ops            = &icx_uncore_mmio_ops,
5274         .format_group   = &skx_uncore_format_group,
5275 };
5276
5277 enum perf_uncore_icx_imc_freerunning_type_id {
5278         ICX_IMC_DCLK,
5279         ICX_IMC_DDR,
5280         ICX_IMC_DDRT,
5281
5282         ICX_IMC_FREERUNNING_TYPE_MAX,
5283 };
5284
5285 static struct freerunning_counters icx_imc_freerunning[] = {
5286         [ICX_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
5287         [ICX_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
5288         [ICX_IMC_DDRT]  = { 0x22a0, 0x8, 0, 2, 48 },
5289 };
5290
5291 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5292         INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
5293
5294         INTEL_UNCORE_EVENT_DESC(read,                   "event=0xff,umask=0x20"),
5295         INTEL_UNCORE_EVENT_DESC(read.scale,             "6.103515625e-5"),
5296         INTEL_UNCORE_EVENT_DESC(read.unit,              "MiB"),
5297         INTEL_UNCORE_EVENT_DESC(write,                  "event=0xff,umask=0x21"),
5298         INTEL_UNCORE_EVENT_DESC(write.scale,            "6.103515625e-5"),
5299         INTEL_UNCORE_EVENT_DESC(write.unit,             "MiB"),
5300
5301         INTEL_UNCORE_EVENT_DESC(ddrt_read,              "event=0xff,umask=0x30"),
5302         INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,        "6.103515625e-5"),
5303         INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,         "MiB"),
5304         INTEL_UNCORE_EVENT_DESC(ddrt_write,             "event=0xff,umask=0x31"),
5305         INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,       "6.103515625e-5"),
5306         INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,        "MiB"),
5307         { /* end: all zeroes */ },
5308 };
5309
5310 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5311 {
5312         int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5313                          SNR_IMC_MMIO_MEM0_OFFSET;
5314
5315         __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5316 }
5317
5318 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5319         .init_box       = icx_uncore_imc_freerunning_init_box,
5320         .exit_box       = uncore_mmio_exit_box,
5321         .read_counter   = uncore_mmio_read_counter,
5322         .hw_config      = uncore_freerunning_hw_config,
5323 };
5324
5325 static struct intel_uncore_type icx_uncore_imc_free_running = {
5326         .name                   = "imc_free_running",
5327         .num_counters           = 5,
5328         .num_boxes              = 4,
5329         .num_freerunning_types  = ICX_IMC_FREERUNNING_TYPE_MAX,
5330         .mmio_map_size          = SNR_IMC_MMIO_SIZE,
5331         .freerunning            = icx_imc_freerunning,
5332         .ops                    = &icx_uncore_imc_freerunning_ops,
5333         .event_descs            = icx_uncore_imc_freerunning_events,
5334         .format_group           = &skx_uncore_iio_freerunning_format_group,
5335 };
5336
5337 static struct intel_uncore_type *icx_mmio_uncores[] = {
5338         &icx_uncore_imc,
5339         &icx_uncore_imc_free_running,
5340         NULL,
5341 };
5342
5343 void icx_uncore_mmio_init(void)
5344 {
5345         uncore_mmio_uncores = icx_mmio_uncores;
5346 }
5347
5348 /* end of ICX uncore support */