Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "uncore.h"
3
4 /* SNB-EP Box level control */
5 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
6 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
7 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
8 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
9 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
10                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
11                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
12 /* SNB-EP event control */
13 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
14 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
15 #define SNBEP_PMON_CTL_RST              (1 << 17)
16 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
17 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
18 #define SNBEP_PMON_CTL_EN               (1 << 22)
19 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
20 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
21 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
22                                          SNBEP_PMON_CTL_UMASK_MASK | \
23                                          SNBEP_PMON_CTL_EDGE_DET | \
24                                          SNBEP_PMON_CTL_INVERT | \
25                                          SNBEP_PMON_CTL_TRESH_MASK)
26
27 /* SNB-EP Ubox event control */
28 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
29 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
30                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
31                                  SNBEP_PMON_CTL_UMASK_MASK | \
32                                  SNBEP_PMON_CTL_EDGE_DET | \
33                                  SNBEP_PMON_CTL_INVERT | \
34                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
35
36 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
37 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
38                                                  SNBEP_CBO_PMON_CTL_TID_EN)
39
40 /* SNB-EP PCU event control */
41 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
42 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
43 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
45 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
46                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
47                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
48                                  SNBEP_PMON_CTL_EDGE_DET | \
49                                  SNBEP_PMON_CTL_INVERT | \
50                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
51                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
53
54 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
55                                 (SNBEP_PMON_RAW_EVENT_MASK | \
56                                  SNBEP_PMON_CTL_EV_SEL_EXT)
57
58 /* SNB-EP pci control register */
59 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
60 #define SNBEP_PCI_PMON_CTL0                     0xd8
61 /* SNB-EP pci counter register */
62 #define SNBEP_PCI_PMON_CTR0                     0xa0
63
64 /* SNB-EP home agent register */
65 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
66 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
67 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
68 /* SNB-EP memory controller register */
69 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
70 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
71 /* SNB-EP QPI register */
72 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
73 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
76
77 /* SNB-EP Ubox register */
78 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
79 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
80
81 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
82 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
83
84 /* SNB-EP Cbo register */
85 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
86 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
87 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
88 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
89 #define SNBEP_CBO_MSR_OFFSET                    0x20
90
91 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
92 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
95
96 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
97         .event = (e),                           \
98         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
99         .config_mask = (m),                     \
100         .idx = (i)                              \
101 }
102
103 /* SNB-EP PCU register */
104 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
105 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
106 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
107 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
108 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
109 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
110 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
111
112 /* IVBEP event control */
113 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
114                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
115 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
116                                          SNBEP_PMON_CTL_UMASK_MASK | \
117                                          SNBEP_PMON_CTL_EDGE_DET | \
118                                          SNBEP_PMON_CTL_TRESH_MASK)
119 /* IVBEP Ubox */
120 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
121 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
122 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
123
124 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
125                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
126                                  SNBEP_PMON_CTL_UMASK_MASK | \
127                                  SNBEP_PMON_CTL_EDGE_DET | \
128                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
129 /* IVBEP Cbo */
130 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
131                                                  SNBEP_CBO_PMON_CTL_TID_EN)
132
133 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
134 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
141
142 /* IVBEP home agent */
143 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
144 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
145                                 (IVBEP_PMON_RAW_EVENT_MASK | \
146                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
147 /* IVBEP PCU */
148 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
149                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
150                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
151                                  SNBEP_PMON_CTL_EDGE_DET | \
152                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
153                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
154                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
155 /* IVBEP QPI */
156 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
157                                 (IVBEP_PMON_RAW_EVENT_MASK | \
158                                  SNBEP_PMON_CTL_EV_SEL_EXT)
159
160 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
161                                 ((1ULL << (n)) - 1)))
162
163 /* Haswell-EP Ubox */
164 #define HSWEP_U_MSR_PMON_CTR0                   0x709
165 #define HSWEP_U_MSR_PMON_CTL0                   0x705
166 #define HSWEP_U_MSR_PMON_FILTER                 0x707
167
168 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
169 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
170
171 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
172 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
173 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
174                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
175                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
176
177 /* Haswell-EP CBo */
178 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
179 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
180 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
181 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
182 #define HSWEP_CBO_MSR_OFFSET                    0x10
183
184
185 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
186 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
187 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
193
194
195 /* Haswell-EP Sbox */
196 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
197 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
198 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
199 #define HSWEP_SBOX_MSR_OFFSET                   0xa
200 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
201                                                  SNBEP_CBO_PMON_CTL_TID_EN)
202
203 /* Haswell-EP PCU */
204 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
205 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
206 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
207 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
208
209 /* KNL Ubox */
210 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
211                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
212                                                 SNBEP_CBO_PMON_CTL_TID_EN)
213 /* KNL CHA */
214 #define KNL_CHA_MSR_OFFSET                      0xc
215 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
216 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
217                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
218                                          KNL_CHA_MSR_PMON_CTL_QOR)
219 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
220 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
221 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
222
223 /* KNL EDC/MC UCLK */
224 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
225 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
226 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
227 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
228 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
229 #define KNL_PMON_FIXED_CTL_EN                   0x1
230
231 /* KNL EDC */
232 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
233 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
234 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
235 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
236 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
237
238 /* KNL MC */
239 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
240 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
241 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
242 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
243 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
244
245 /* KNL IRP */
246 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
247 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
248                                                  KNL_CHA_MSR_PMON_CTL_QOR)
249 /* KNL PCU */
250 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
251 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
252 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
253 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
254                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
255                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
256                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
257                                  SNBEP_PMON_CTL_EDGE_DET | \
258                                  SNBEP_CBO_PMON_CTL_TID_EN | \
259                                  SNBEP_PMON_CTL_INVERT | \
260                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
261                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
262                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
263
264 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
265 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
266 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
267 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
268 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
269 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
270 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
271 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
272 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
273 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
274 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
275 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
276 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
277 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
278 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
279 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
280 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
281 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
282 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
283 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
284 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
285 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
286 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
287 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
288 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
289 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
290 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
291 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
292 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
293 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
294 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
295 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
296 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
297 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
298 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
299 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
300 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
301 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
302 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
303 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
304 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
305 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
306 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
307 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
308 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
309 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
310 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
311 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
312 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
313 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
314 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
315 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
316 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
317 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
318 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
319 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
320 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
321 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
322 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
323 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
324 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
325
326 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
327 {
328         struct pci_dev *pdev = box->pci_dev;
329         int box_ctl = uncore_pci_box_ctl(box);
330         u32 config = 0;
331
332         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
333                 config |= SNBEP_PMON_BOX_CTL_FRZ;
334                 pci_write_config_dword(pdev, box_ctl, config);
335         }
336 }
337
338 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
339 {
340         struct pci_dev *pdev = box->pci_dev;
341         int box_ctl = uncore_pci_box_ctl(box);
342         u32 config = 0;
343
344         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
345                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
346                 pci_write_config_dword(pdev, box_ctl, config);
347         }
348 }
349
350 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
351 {
352         struct pci_dev *pdev = box->pci_dev;
353         struct hw_perf_event *hwc = &event->hw;
354
355         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
356 }
357
358 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
359 {
360         struct pci_dev *pdev = box->pci_dev;
361         struct hw_perf_event *hwc = &event->hw;
362
363         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
364 }
365
366 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
367 {
368         struct pci_dev *pdev = box->pci_dev;
369         struct hw_perf_event *hwc = &event->hw;
370         u64 count = 0;
371
372         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
373         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
374
375         return count;
376 }
377
378 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
379 {
380         struct pci_dev *pdev = box->pci_dev;
381         int box_ctl = uncore_pci_box_ctl(box);
382
383         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
384 }
385
386 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
387 {
388         u64 config;
389         unsigned msr;
390
391         msr = uncore_msr_box_ctl(box);
392         if (msr) {
393                 rdmsrl(msr, config);
394                 config |= SNBEP_PMON_BOX_CTL_FRZ;
395                 wrmsrl(msr, config);
396         }
397 }
398
399 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
400 {
401         u64 config;
402         unsigned msr;
403
404         msr = uncore_msr_box_ctl(box);
405         if (msr) {
406                 rdmsrl(msr, config);
407                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
408                 wrmsrl(msr, config);
409         }
410 }
411
412 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
413 {
414         struct hw_perf_event *hwc = &event->hw;
415         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
416
417         if (reg1->idx != EXTRA_REG_NONE)
418                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
419
420         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
421 }
422
423 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
424                                         struct perf_event *event)
425 {
426         struct hw_perf_event *hwc = &event->hw;
427
428         wrmsrl(hwc->config_base, hwc->config);
429 }
430
431 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
432 {
433         unsigned msr = uncore_msr_box_ctl(box);
434
435         if (msr)
436                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
437 }
438
439 static struct attribute *snbep_uncore_formats_attr[] = {
440         &format_attr_event.attr,
441         &format_attr_umask.attr,
442         &format_attr_edge.attr,
443         &format_attr_inv.attr,
444         &format_attr_thresh8.attr,
445         NULL,
446 };
447
448 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
449         &format_attr_event.attr,
450         &format_attr_umask.attr,
451         &format_attr_edge.attr,
452         &format_attr_inv.attr,
453         &format_attr_thresh5.attr,
454         NULL,
455 };
456
457 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
458         &format_attr_event.attr,
459         &format_attr_umask.attr,
460         &format_attr_edge.attr,
461         &format_attr_tid_en.attr,
462         &format_attr_inv.attr,
463         &format_attr_thresh8.attr,
464         &format_attr_filter_tid.attr,
465         &format_attr_filter_nid.attr,
466         &format_attr_filter_state.attr,
467         &format_attr_filter_opc.attr,
468         NULL,
469 };
470
471 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
472         &format_attr_event.attr,
473         &format_attr_occ_sel.attr,
474         &format_attr_edge.attr,
475         &format_attr_inv.attr,
476         &format_attr_thresh5.attr,
477         &format_attr_occ_invert.attr,
478         &format_attr_occ_edge.attr,
479         &format_attr_filter_band0.attr,
480         &format_attr_filter_band1.attr,
481         &format_attr_filter_band2.attr,
482         &format_attr_filter_band3.attr,
483         NULL,
484 };
485
486 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
487         &format_attr_event_ext.attr,
488         &format_attr_umask.attr,
489         &format_attr_edge.attr,
490         &format_attr_inv.attr,
491         &format_attr_thresh8.attr,
492         &format_attr_match_rds.attr,
493         &format_attr_match_rnid30.attr,
494         &format_attr_match_rnid4.attr,
495         &format_attr_match_dnid.attr,
496         &format_attr_match_mc.attr,
497         &format_attr_match_opc.attr,
498         &format_attr_match_vnw.attr,
499         &format_attr_match0.attr,
500         &format_attr_match1.attr,
501         &format_attr_mask_rds.attr,
502         &format_attr_mask_rnid30.attr,
503         &format_attr_mask_rnid4.attr,
504         &format_attr_mask_dnid.attr,
505         &format_attr_mask_mc.attr,
506         &format_attr_mask_opc.attr,
507         &format_attr_mask_vnw.attr,
508         &format_attr_mask0.attr,
509         &format_attr_mask1.attr,
510         NULL,
511 };
512
513 static struct uncore_event_desc snbep_uncore_imc_events[] = {
514         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
515         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
516         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
517         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
518         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
519         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
520         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
521         { /* end: all zeroes */ },
522 };
523
524 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
525         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
526         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
527         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
528         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
529         { /* end: all zeroes */ },
530 };
531
532 static struct attribute_group snbep_uncore_format_group = {
533         .name = "format",
534         .attrs = snbep_uncore_formats_attr,
535 };
536
537 static struct attribute_group snbep_uncore_ubox_format_group = {
538         .name = "format",
539         .attrs = snbep_uncore_ubox_formats_attr,
540 };
541
542 static struct attribute_group snbep_uncore_cbox_format_group = {
543         .name = "format",
544         .attrs = snbep_uncore_cbox_formats_attr,
545 };
546
547 static struct attribute_group snbep_uncore_pcu_format_group = {
548         .name = "format",
549         .attrs = snbep_uncore_pcu_formats_attr,
550 };
551
552 static struct attribute_group snbep_uncore_qpi_format_group = {
553         .name = "format",
554         .attrs = snbep_uncore_qpi_formats_attr,
555 };
556
557 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
558         .disable_box    = snbep_uncore_msr_disable_box,         \
559         .enable_box     = snbep_uncore_msr_enable_box,          \
560         .disable_event  = snbep_uncore_msr_disable_event,       \
561         .enable_event   = snbep_uncore_msr_enable_event,        \
562         .read_counter   = uncore_msr_read_counter
563
564 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
565         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
566         .init_box       = snbep_uncore_msr_init_box             \
567
568 static struct intel_uncore_ops snbep_uncore_msr_ops = {
569         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
570 };
571
572 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
573         .init_box       = snbep_uncore_pci_init_box,            \
574         .disable_box    = snbep_uncore_pci_disable_box,         \
575         .enable_box     = snbep_uncore_pci_enable_box,          \
576         .disable_event  = snbep_uncore_pci_disable_event,       \
577         .read_counter   = snbep_uncore_pci_read_counter
578
579 static struct intel_uncore_ops snbep_uncore_pci_ops = {
580         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
581         .enable_event   = snbep_uncore_pci_enable_event,        \
582 };
583
584 static struct event_constraint snbep_uncore_cbox_constraints[] = {
585         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
586         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
587         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
588         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
589         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
590         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
591         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
592         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
593         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
594         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
595         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
596         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
597         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
598         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
599         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
600         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
601         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
602         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
603         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
604         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
605         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
606         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
607         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
608         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
609         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
610         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
611         EVENT_CONSTRAINT_END
612 };
613
614 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
615         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
616         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
617         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
618         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
619         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
620         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
621         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
622         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
623         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
624         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
625         EVENT_CONSTRAINT_END
626 };
627
628 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
629         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
630         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
631         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
632         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
633         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
634         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
635         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
636         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
637         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
638         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
639         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
640         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
641         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
642         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
643         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
644         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
645         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
646         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
647         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
648         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
649         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
650         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
651         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
652         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
653         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
654         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
655         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
656         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
657         EVENT_CONSTRAINT_END
658 };
659
660 static struct intel_uncore_type snbep_uncore_ubox = {
661         .name           = "ubox",
662         .num_counters   = 2,
663         .num_boxes      = 1,
664         .perf_ctr_bits  = 44,
665         .fixed_ctr_bits = 48,
666         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
667         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
668         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
669         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
670         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
671         .ops            = &snbep_uncore_msr_ops,
672         .format_group   = &snbep_uncore_ubox_format_group,
673 };
674
675 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
676         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
677                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
678         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
679         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
680         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
681         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
682         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
683         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
684         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
685         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
686         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
687         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
688         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
689         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
690         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
691         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
692         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
693         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
694         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
695         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
696         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
697         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
698         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
699         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
700         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
701         EVENT_EXTRA_END
702 };
703
704 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
705 {
706         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
707         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
708         int i;
709
710         if (uncore_box_is_fake(box))
711                 return;
712
713         for (i = 0; i < 5; i++) {
714                 if (reg1->alloc & (0x1 << i))
715                         atomic_sub(1 << (i * 6), &er->ref);
716         }
717         reg1->alloc = 0;
718 }
719
720 static struct event_constraint *
721 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
722                             u64 (*cbox_filter_mask)(int fields))
723 {
724         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
725         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
726         int i, alloc = 0;
727         unsigned long flags;
728         u64 mask;
729
730         if (reg1->idx == EXTRA_REG_NONE)
731                 return NULL;
732
733         raw_spin_lock_irqsave(&er->lock, flags);
734         for (i = 0; i < 5; i++) {
735                 if (!(reg1->idx & (0x1 << i)))
736                         continue;
737                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
738                         continue;
739
740                 mask = cbox_filter_mask(0x1 << i);
741                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
742                     !((reg1->config ^ er->config) & mask)) {
743                         atomic_add(1 << (i * 6), &er->ref);
744                         er->config &= ~mask;
745                         er->config |= reg1->config & mask;
746                         alloc |= (0x1 << i);
747                 } else {
748                         break;
749                 }
750         }
751         raw_spin_unlock_irqrestore(&er->lock, flags);
752         if (i < 5)
753                 goto fail;
754
755         if (!uncore_box_is_fake(box))
756                 reg1->alloc |= alloc;
757
758         return NULL;
759 fail:
760         for (; i >= 0; i--) {
761                 if (alloc & (0x1 << i))
762                         atomic_sub(1 << (i * 6), &er->ref);
763         }
764         return &uncore_constraint_empty;
765 }
766
767 static u64 snbep_cbox_filter_mask(int fields)
768 {
769         u64 mask = 0;
770
771         if (fields & 0x1)
772                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
773         if (fields & 0x2)
774                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
775         if (fields & 0x4)
776                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
777         if (fields & 0x8)
778                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
779
780         return mask;
781 }
782
783 static struct event_constraint *
784 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
785 {
786         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
787 }
788
789 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
790 {
791         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
792         struct extra_reg *er;
793         int idx = 0;
794
795         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
796                 if (er->event != (event->hw.config & er->config_mask))
797                         continue;
798                 idx |= er->idx;
799         }
800
801         if (idx) {
802                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
803                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
804                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
805                 reg1->idx = idx;
806         }
807         return 0;
808 }
809
810 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
811         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
812         .hw_config              = snbep_cbox_hw_config,
813         .get_constraint         = snbep_cbox_get_constraint,
814         .put_constraint         = snbep_cbox_put_constraint,
815 };
816
817 static struct intel_uncore_type snbep_uncore_cbox = {
818         .name                   = "cbox",
819         .num_counters           = 4,
820         .num_boxes              = 8,
821         .perf_ctr_bits          = 44,
822         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
823         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
824         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
825         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
826         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
827         .num_shared_regs        = 1,
828         .constraints            = snbep_uncore_cbox_constraints,
829         .ops                    = &snbep_uncore_cbox_ops,
830         .format_group           = &snbep_uncore_cbox_format_group,
831 };
832
833 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
834 {
835         struct hw_perf_event *hwc = &event->hw;
836         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
837         u64 config = reg1->config;
838
839         if (new_idx > reg1->idx)
840                 config <<= 8 * (new_idx - reg1->idx);
841         else
842                 config >>= 8 * (reg1->idx - new_idx);
843
844         if (modify) {
845                 hwc->config += new_idx - reg1->idx;
846                 reg1->config = config;
847                 reg1->idx = new_idx;
848         }
849         return config;
850 }
851
852 static struct event_constraint *
853 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
854 {
855         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
856         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
857         unsigned long flags;
858         int idx = reg1->idx;
859         u64 mask, config1 = reg1->config;
860         bool ok = false;
861
862         if (reg1->idx == EXTRA_REG_NONE ||
863             (!uncore_box_is_fake(box) && reg1->alloc))
864                 return NULL;
865 again:
866         mask = 0xffULL << (idx * 8);
867         raw_spin_lock_irqsave(&er->lock, flags);
868         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
869             !((config1 ^ er->config) & mask)) {
870                 atomic_add(1 << (idx * 8), &er->ref);
871                 er->config &= ~mask;
872                 er->config |= config1 & mask;
873                 ok = true;
874         }
875         raw_spin_unlock_irqrestore(&er->lock, flags);
876
877         if (!ok) {
878                 idx = (idx + 1) % 4;
879                 if (idx != reg1->idx) {
880                         config1 = snbep_pcu_alter_er(event, idx, false);
881                         goto again;
882                 }
883                 return &uncore_constraint_empty;
884         }
885
886         if (!uncore_box_is_fake(box)) {
887                 if (idx != reg1->idx)
888                         snbep_pcu_alter_er(event, idx, true);
889                 reg1->alloc = 1;
890         }
891         return NULL;
892 }
893
894 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
895 {
896         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
897         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
898
899         if (uncore_box_is_fake(box) || !reg1->alloc)
900                 return;
901
902         atomic_sub(1 << (reg1->idx * 8), &er->ref);
903         reg1->alloc = 0;
904 }
905
906 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
907 {
908         struct hw_perf_event *hwc = &event->hw;
909         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
910         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
911
912         if (ev_sel >= 0xb && ev_sel <= 0xe) {
913                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
914                 reg1->idx = ev_sel - 0xb;
915                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
916         }
917         return 0;
918 }
919
920 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
921         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
922         .hw_config              = snbep_pcu_hw_config,
923         .get_constraint         = snbep_pcu_get_constraint,
924         .put_constraint         = snbep_pcu_put_constraint,
925 };
926
927 static struct intel_uncore_type snbep_uncore_pcu = {
928         .name                   = "pcu",
929         .num_counters           = 4,
930         .num_boxes              = 1,
931         .perf_ctr_bits          = 48,
932         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
933         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
934         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
935         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
936         .num_shared_regs        = 1,
937         .ops                    = &snbep_uncore_pcu_ops,
938         .format_group           = &snbep_uncore_pcu_format_group,
939 };
940
941 static struct intel_uncore_type *snbep_msr_uncores[] = {
942         &snbep_uncore_ubox,
943         &snbep_uncore_cbox,
944         &snbep_uncore_pcu,
945         NULL,
946 };
947
948 void snbep_uncore_cpu_init(void)
949 {
950         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
951                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
952         uncore_msr_uncores = snbep_msr_uncores;
953 }
954
955 enum {
956         SNBEP_PCI_QPI_PORT0_FILTER,
957         SNBEP_PCI_QPI_PORT1_FILTER,
958         HSWEP_PCI_PCU_3,
959 };
960
961 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
962 {
963         struct hw_perf_event *hwc = &event->hw;
964         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
965         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
966
967         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
968                 reg1->idx = 0;
969                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
970                 reg1->config = event->attr.config1;
971                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
972                 reg2->config = event->attr.config2;
973         }
974         return 0;
975 }
976
977 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
978 {
979         struct pci_dev *pdev = box->pci_dev;
980         struct hw_perf_event *hwc = &event->hw;
981         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
982         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
983
984         if (reg1->idx != EXTRA_REG_NONE) {
985                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
986                 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
987                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
988
989                 if (filter_pdev) {
990                         pci_write_config_dword(filter_pdev, reg1->reg,
991                                                 (u32)reg1->config);
992                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
993                                                 (u32)(reg1->config >> 32));
994                         pci_write_config_dword(filter_pdev, reg2->reg,
995                                                 (u32)reg2->config);
996                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
997                                                 (u32)(reg2->config >> 32));
998                 }
999         }
1000
1001         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1002 }
1003
1004 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1005         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1006         .enable_event           = snbep_qpi_enable_event,
1007         .hw_config              = snbep_qpi_hw_config,
1008         .get_constraint         = uncore_get_constraint,
1009         .put_constraint         = uncore_put_constraint,
1010 };
1011
1012 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1013         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1014         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1015         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1016         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1017         .ops            = &snbep_uncore_pci_ops,                \
1018         .format_group   = &snbep_uncore_format_group
1019
1020 static struct intel_uncore_type snbep_uncore_ha = {
1021         .name           = "ha",
1022         .num_counters   = 4,
1023         .num_boxes      = 1,
1024         .perf_ctr_bits  = 48,
1025         SNBEP_UNCORE_PCI_COMMON_INIT(),
1026 };
1027
1028 static struct intel_uncore_type snbep_uncore_imc = {
1029         .name           = "imc",
1030         .num_counters   = 4,
1031         .num_boxes      = 4,
1032         .perf_ctr_bits  = 48,
1033         .fixed_ctr_bits = 48,
1034         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1035         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1036         .event_descs    = snbep_uncore_imc_events,
1037         SNBEP_UNCORE_PCI_COMMON_INIT(),
1038 };
1039
1040 static struct intel_uncore_type snbep_uncore_qpi = {
1041         .name                   = "qpi",
1042         .num_counters           = 4,
1043         .num_boxes              = 2,
1044         .perf_ctr_bits          = 48,
1045         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1046         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1047         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1048         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1049         .num_shared_regs        = 1,
1050         .ops                    = &snbep_uncore_qpi_ops,
1051         .event_descs            = snbep_uncore_qpi_events,
1052         .format_group           = &snbep_uncore_qpi_format_group,
1053 };
1054
1055
1056 static struct intel_uncore_type snbep_uncore_r2pcie = {
1057         .name           = "r2pcie",
1058         .num_counters   = 4,
1059         .num_boxes      = 1,
1060         .perf_ctr_bits  = 44,
1061         .constraints    = snbep_uncore_r2pcie_constraints,
1062         SNBEP_UNCORE_PCI_COMMON_INIT(),
1063 };
1064
1065 static struct intel_uncore_type snbep_uncore_r3qpi = {
1066         .name           = "r3qpi",
1067         .num_counters   = 3,
1068         .num_boxes      = 2,
1069         .perf_ctr_bits  = 44,
1070         .constraints    = snbep_uncore_r3qpi_constraints,
1071         SNBEP_UNCORE_PCI_COMMON_INIT(),
1072 };
1073
1074 enum {
1075         SNBEP_PCI_UNCORE_HA,
1076         SNBEP_PCI_UNCORE_IMC,
1077         SNBEP_PCI_UNCORE_QPI,
1078         SNBEP_PCI_UNCORE_R2PCIE,
1079         SNBEP_PCI_UNCORE_R3QPI,
1080 };
1081
1082 static struct intel_uncore_type *snbep_pci_uncores[] = {
1083         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1084         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1085         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1086         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1087         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1088         NULL,
1089 };
1090
1091 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1092         { /* Home Agent */
1093                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1094                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1095         },
1096         { /* MC Channel 0 */
1097                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1098                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1099         },
1100         { /* MC Channel 1 */
1101                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1102                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1103         },
1104         { /* MC Channel 2 */
1105                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1106                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1107         },
1108         { /* MC Channel 3 */
1109                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1110                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1111         },
1112         { /* QPI Port 0 */
1113                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1114                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1115         },
1116         { /* QPI Port 1 */
1117                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1118                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1119         },
1120         { /* R2PCIe */
1121                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1122                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1123         },
1124         { /* R3QPI Link 0 */
1125                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1126                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1127         },
1128         { /* R3QPI Link 1 */
1129                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1130                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1131         },
1132         { /* QPI Port 0 filter  */
1133                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1134                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1135                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1136         },
1137         { /* QPI Port 0 filter  */
1138                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1139                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1140                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1141         },
1142         { /* end: all zeroes */ }
1143 };
1144
1145 static struct pci_driver snbep_uncore_pci_driver = {
1146         .name           = "snbep_uncore",
1147         .id_table       = snbep_uncore_pci_ids,
1148 };
1149
1150 /*
1151  * build pci bus to socket mapping
1152  */
1153 static int snbep_pci2phy_map_init(int devid)
1154 {
1155         struct pci_dev *ubox_dev = NULL;
1156         int i, bus, nodeid, segment;
1157         struct pci2phy_map *map;
1158         int err = 0;
1159         u32 config = 0;
1160
1161         while (1) {
1162                 /* find the UBOX device */
1163                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1164                 if (!ubox_dev)
1165                         break;
1166                 bus = ubox_dev->bus->number;
1167                 /* get the Node ID of the local register */
1168                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1169                 if (err)
1170                         break;
1171                 nodeid = config;
1172                 /* get the Node ID mapping */
1173                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1174                 if (err)
1175                         break;
1176
1177                 segment = pci_domain_nr(ubox_dev->bus);
1178                 raw_spin_lock(&pci2phy_map_lock);
1179                 map = __find_pci2phy_map(segment);
1180                 if (!map) {
1181                         raw_spin_unlock(&pci2phy_map_lock);
1182                         err = -ENOMEM;
1183                         break;
1184                 }
1185
1186                 /*
1187                  * every three bits in the Node ID mapping register maps
1188                  * to a particular node.
1189                  */
1190                 for (i = 0; i < 8; i++) {
1191                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1192                                 map->pbus_to_physid[bus] = i;
1193                                 break;
1194                         }
1195                 }
1196                 raw_spin_unlock(&pci2phy_map_lock);
1197         }
1198
1199         if (!err) {
1200                 /*
1201                  * For PCI bus with no UBOX device, find the next bus
1202                  * that has UBOX device and use its mapping.
1203                  */
1204                 raw_spin_lock(&pci2phy_map_lock);
1205                 list_for_each_entry(map, &pci2phy_map_head, list) {
1206                         i = -1;
1207                         for (bus = 255; bus >= 0; bus--) {
1208                                 if (map->pbus_to_physid[bus] >= 0)
1209                                         i = map->pbus_to_physid[bus];
1210                                 else
1211                                         map->pbus_to_physid[bus] = i;
1212                         }
1213                 }
1214                 raw_spin_unlock(&pci2phy_map_lock);
1215         }
1216
1217         pci_dev_put(ubox_dev);
1218
1219         return err ? pcibios_err_to_errno(err) : 0;
1220 }
1221
1222 int snbep_uncore_pci_init(void)
1223 {
1224         int ret = snbep_pci2phy_map_init(0x3ce0);
1225         if (ret)
1226                 return ret;
1227         uncore_pci_uncores = snbep_pci_uncores;
1228         uncore_pci_driver = &snbep_uncore_pci_driver;
1229         return 0;
1230 }
1231 /* end of Sandy Bridge-EP uncore support */
1232
1233 /* IvyTown uncore support */
1234 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1235 {
1236         unsigned msr = uncore_msr_box_ctl(box);
1237         if (msr)
1238                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1239 }
1240
1241 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1242 {
1243         struct pci_dev *pdev = box->pci_dev;
1244
1245         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1246 }
1247
1248 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1249         .init_box       = ivbep_uncore_msr_init_box,            \
1250         .disable_box    = snbep_uncore_msr_disable_box,         \
1251         .enable_box     = snbep_uncore_msr_enable_box,          \
1252         .disable_event  = snbep_uncore_msr_disable_event,       \
1253         .enable_event   = snbep_uncore_msr_enable_event,        \
1254         .read_counter   = uncore_msr_read_counter
1255
1256 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1257         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1258 };
1259
1260 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1261         .init_box       = ivbep_uncore_pci_init_box,
1262         .disable_box    = snbep_uncore_pci_disable_box,
1263         .enable_box     = snbep_uncore_pci_enable_box,
1264         .disable_event  = snbep_uncore_pci_disable_event,
1265         .enable_event   = snbep_uncore_pci_enable_event,
1266         .read_counter   = snbep_uncore_pci_read_counter,
1267 };
1268
1269 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1270         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1271         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1272         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1273         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1274         .ops            = &ivbep_uncore_pci_ops,                        \
1275         .format_group   = &ivbep_uncore_format_group
1276
1277 static struct attribute *ivbep_uncore_formats_attr[] = {
1278         &format_attr_event.attr,
1279         &format_attr_umask.attr,
1280         &format_attr_edge.attr,
1281         &format_attr_inv.attr,
1282         &format_attr_thresh8.attr,
1283         NULL,
1284 };
1285
1286 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1287         &format_attr_event.attr,
1288         &format_attr_umask.attr,
1289         &format_attr_edge.attr,
1290         &format_attr_inv.attr,
1291         &format_attr_thresh5.attr,
1292         NULL,
1293 };
1294
1295 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1296         &format_attr_event.attr,
1297         &format_attr_umask.attr,
1298         &format_attr_edge.attr,
1299         &format_attr_tid_en.attr,
1300         &format_attr_thresh8.attr,
1301         &format_attr_filter_tid.attr,
1302         &format_attr_filter_link.attr,
1303         &format_attr_filter_state2.attr,
1304         &format_attr_filter_nid2.attr,
1305         &format_attr_filter_opc2.attr,
1306         &format_attr_filter_nc.attr,
1307         &format_attr_filter_c6.attr,
1308         &format_attr_filter_isoc.attr,
1309         NULL,
1310 };
1311
1312 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1313         &format_attr_event.attr,
1314         &format_attr_occ_sel.attr,
1315         &format_attr_edge.attr,
1316         &format_attr_thresh5.attr,
1317         &format_attr_occ_invert.attr,
1318         &format_attr_occ_edge.attr,
1319         &format_attr_filter_band0.attr,
1320         &format_attr_filter_band1.attr,
1321         &format_attr_filter_band2.attr,
1322         &format_attr_filter_band3.attr,
1323         NULL,
1324 };
1325
1326 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1327         &format_attr_event_ext.attr,
1328         &format_attr_umask.attr,
1329         &format_attr_edge.attr,
1330         &format_attr_thresh8.attr,
1331         &format_attr_match_rds.attr,
1332         &format_attr_match_rnid30.attr,
1333         &format_attr_match_rnid4.attr,
1334         &format_attr_match_dnid.attr,
1335         &format_attr_match_mc.attr,
1336         &format_attr_match_opc.attr,
1337         &format_attr_match_vnw.attr,
1338         &format_attr_match0.attr,
1339         &format_attr_match1.attr,
1340         &format_attr_mask_rds.attr,
1341         &format_attr_mask_rnid30.attr,
1342         &format_attr_mask_rnid4.attr,
1343         &format_attr_mask_dnid.attr,
1344         &format_attr_mask_mc.attr,
1345         &format_attr_mask_opc.attr,
1346         &format_attr_mask_vnw.attr,
1347         &format_attr_mask0.attr,
1348         &format_attr_mask1.attr,
1349         NULL,
1350 };
1351
1352 static struct attribute_group ivbep_uncore_format_group = {
1353         .name = "format",
1354         .attrs = ivbep_uncore_formats_attr,
1355 };
1356
1357 static struct attribute_group ivbep_uncore_ubox_format_group = {
1358         .name = "format",
1359         .attrs = ivbep_uncore_ubox_formats_attr,
1360 };
1361
1362 static struct attribute_group ivbep_uncore_cbox_format_group = {
1363         .name = "format",
1364         .attrs = ivbep_uncore_cbox_formats_attr,
1365 };
1366
1367 static struct attribute_group ivbep_uncore_pcu_format_group = {
1368         .name = "format",
1369         .attrs = ivbep_uncore_pcu_formats_attr,
1370 };
1371
1372 static struct attribute_group ivbep_uncore_qpi_format_group = {
1373         .name = "format",
1374         .attrs = ivbep_uncore_qpi_formats_attr,
1375 };
1376
1377 static struct intel_uncore_type ivbep_uncore_ubox = {
1378         .name           = "ubox",
1379         .num_counters   = 2,
1380         .num_boxes      = 1,
1381         .perf_ctr_bits  = 44,
1382         .fixed_ctr_bits = 48,
1383         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1384         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1385         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1386         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1387         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1388         .ops            = &ivbep_uncore_msr_ops,
1389         .format_group   = &ivbep_uncore_ubox_format_group,
1390 };
1391
1392 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1393         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1394                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1395         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1396         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1397         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1398         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1399         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1400         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1401         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1402         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1403         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1404         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1405         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1406         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1407         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1408         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1409         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1410         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1411         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1412         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1413         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1414         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1415         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1416         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1417         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1418         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1419         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1420         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1421         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1422         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1423         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1424         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1425         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1426         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1427         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1428         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1429         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1430         EVENT_EXTRA_END
1431 };
1432
1433 static u64 ivbep_cbox_filter_mask(int fields)
1434 {
1435         u64 mask = 0;
1436
1437         if (fields & 0x1)
1438                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1439         if (fields & 0x2)
1440                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1441         if (fields & 0x4)
1442                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1443         if (fields & 0x8)
1444                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1445         if (fields & 0x10) {
1446                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1447                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1448                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1449                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1450         }
1451
1452         return mask;
1453 }
1454
1455 static struct event_constraint *
1456 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1457 {
1458         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1459 }
1460
1461 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1462 {
1463         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1464         struct extra_reg *er;
1465         int idx = 0;
1466
1467         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1468                 if (er->event != (event->hw.config & er->config_mask))
1469                         continue;
1470                 idx |= er->idx;
1471         }
1472
1473         if (idx) {
1474                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1475                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1476                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1477                 reg1->idx = idx;
1478         }
1479         return 0;
1480 }
1481
1482 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1483 {
1484         struct hw_perf_event *hwc = &event->hw;
1485         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1486
1487         if (reg1->idx != EXTRA_REG_NONE) {
1488                 u64 filter = uncore_shared_reg_config(box, 0);
1489                 wrmsrl(reg1->reg, filter & 0xffffffff);
1490                 wrmsrl(reg1->reg + 6, filter >> 32);
1491         }
1492
1493         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1494 }
1495
1496 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1497         .init_box               = ivbep_uncore_msr_init_box,
1498         .disable_box            = snbep_uncore_msr_disable_box,
1499         .enable_box             = snbep_uncore_msr_enable_box,
1500         .disable_event          = snbep_uncore_msr_disable_event,
1501         .enable_event           = ivbep_cbox_enable_event,
1502         .read_counter           = uncore_msr_read_counter,
1503         .hw_config              = ivbep_cbox_hw_config,
1504         .get_constraint         = ivbep_cbox_get_constraint,
1505         .put_constraint         = snbep_cbox_put_constraint,
1506 };
1507
1508 static struct intel_uncore_type ivbep_uncore_cbox = {
1509         .name                   = "cbox",
1510         .num_counters           = 4,
1511         .num_boxes              = 15,
1512         .perf_ctr_bits          = 44,
1513         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1514         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1515         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1516         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1517         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1518         .num_shared_regs        = 1,
1519         .constraints            = snbep_uncore_cbox_constraints,
1520         .ops                    = &ivbep_uncore_cbox_ops,
1521         .format_group           = &ivbep_uncore_cbox_format_group,
1522 };
1523
1524 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1525         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1526         .hw_config              = snbep_pcu_hw_config,
1527         .get_constraint         = snbep_pcu_get_constraint,
1528         .put_constraint         = snbep_pcu_put_constraint,
1529 };
1530
1531 static struct intel_uncore_type ivbep_uncore_pcu = {
1532         .name                   = "pcu",
1533         .num_counters           = 4,
1534         .num_boxes              = 1,
1535         .perf_ctr_bits          = 48,
1536         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1537         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1538         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1539         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1540         .num_shared_regs        = 1,
1541         .ops                    = &ivbep_uncore_pcu_ops,
1542         .format_group           = &ivbep_uncore_pcu_format_group,
1543 };
1544
1545 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1546         &ivbep_uncore_ubox,
1547         &ivbep_uncore_cbox,
1548         &ivbep_uncore_pcu,
1549         NULL,
1550 };
1551
1552 void ivbep_uncore_cpu_init(void)
1553 {
1554         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1555                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1556         uncore_msr_uncores = ivbep_msr_uncores;
1557 }
1558
1559 static struct intel_uncore_type ivbep_uncore_ha = {
1560         .name           = "ha",
1561         .num_counters   = 4,
1562         .num_boxes      = 2,
1563         .perf_ctr_bits  = 48,
1564         IVBEP_UNCORE_PCI_COMMON_INIT(),
1565 };
1566
1567 static struct intel_uncore_type ivbep_uncore_imc = {
1568         .name           = "imc",
1569         .num_counters   = 4,
1570         .num_boxes      = 8,
1571         .perf_ctr_bits  = 48,
1572         .fixed_ctr_bits = 48,
1573         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1574         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1575         .event_descs    = snbep_uncore_imc_events,
1576         IVBEP_UNCORE_PCI_COMMON_INIT(),
1577 };
1578
1579 /* registers in IRP boxes are not properly aligned */
1580 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1581 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1582
1583 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1584 {
1585         struct pci_dev *pdev = box->pci_dev;
1586         struct hw_perf_event *hwc = &event->hw;
1587
1588         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1589                                hwc->config | SNBEP_PMON_CTL_EN);
1590 }
1591
1592 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1593 {
1594         struct pci_dev *pdev = box->pci_dev;
1595         struct hw_perf_event *hwc = &event->hw;
1596
1597         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1598 }
1599
1600 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1601 {
1602         struct pci_dev *pdev = box->pci_dev;
1603         struct hw_perf_event *hwc = &event->hw;
1604         u64 count = 0;
1605
1606         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1607         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1608
1609         return count;
1610 }
1611
1612 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1613         .init_box       = ivbep_uncore_pci_init_box,
1614         .disable_box    = snbep_uncore_pci_disable_box,
1615         .enable_box     = snbep_uncore_pci_enable_box,
1616         .disable_event  = ivbep_uncore_irp_disable_event,
1617         .enable_event   = ivbep_uncore_irp_enable_event,
1618         .read_counter   = ivbep_uncore_irp_read_counter,
1619 };
1620
1621 static struct intel_uncore_type ivbep_uncore_irp = {
1622         .name                   = "irp",
1623         .num_counters           = 4,
1624         .num_boxes              = 1,
1625         .perf_ctr_bits          = 48,
1626         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1627         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1628         .ops                    = &ivbep_uncore_irp_ops,
1629         .format_group           = &ivbep_uncore_format_group,
1630 };
1631
1632 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1633         .init_box       = ivbep_uncore_pci_init_box,
1634         .disable_box    = snbep_uncore_pci_disable_box,
1635         .enable_box     = snbep_uncore_pci_enable_box,
1636         .disable_event  = snbep_uncore_pci_disable_event,
1637         .enable_event   = snbep_qpi_enable_event,
1638         .read_counter   = snbep_uncore_pci_read_counter,
1639         .hw_config      = snbep_qpi_hw_config,
1640         .get_constraint = uncore_get_constraint,
1641         .put_constraint = uncore_put_constraint,
1642 };
1643
1644 static struct intel_uncore_type ivbep_uncore_qpi = {
1645         .name                   = "qpi",
1646         .num_counters           = 4,
1647         .num_boxes              = 3,
1648         .perf_ctr_bits          = 48,
1649         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1650         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1651         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1652         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1653         .num_shared_regs        = 1,
1654         .ops                    = &ivbep_uncore_qpi_ops,
1655         .format_group           = &ivbep_uncore_qpi_format_group,
1656 };
1657
1658 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1659         .name           = "r2pcie",
1660         .num_counters   = 4,
1661         .num_boxes      = 1,
1662         .perf_ctr_bits  = 44,
1663         .constraints    = snbep_uncore_r2pcie_constraints,
1664         IVBEP_UNCORE_PCI_COMMON_INIT(),
1665 };
1666
1667 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1668         .name           = "r3qpi",
1669         .num_counters   = 3,
1670         .num_boxes      = 2,
1671         .perf_ctr_bits  = 44,
1672         .constraints    = snbep_uncore_r3qpi_constraints,
1673         IVBEP_UNCORE_PCI_COMMON_INIT(),
1674 };
1675
1676 enum {
1677         IVBEP_PCI_UNCORE_HA,
1678         IVBEP_PCI_UNCORE_IMC,
1679         IVBEP_PCI_UNCORE_IRP,
1680         IVBEP_PCI_UNCORE_QPI,
1681         IVBEP_PCI_UNCORE_R2PCIE,
1682         IVBEP_PCI_UNCORE_R3QPI,
1683 };
1684
1685 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1686         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1687         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1688         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1689         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1690         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1691         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1692         NULL,
1693 };
1694
1695 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1696         { /* Home Agent 0 */
1697                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1698                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1699         },
1700         { /* Home Agent 1 */
1701                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1702                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1703         },
1704         { /* MC0 Channel 0 */
1705                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1706                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1707         },
1708         { /* MC0 Channel 1 */
1709                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1710                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1711         },
1712         { /* MC0 Channel 3 */
1713                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1714                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1715         },
1716         { /* MC0 Channel 4 */
1717                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1718                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1719         },
1720         { /* MC1 Channel 0 */
1721                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1722                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1723         },
1724         { /* MC1 Channel 1 */
1725                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1726                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1727         },
1728         { /* MC1 Channel 3 */
1729                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1730                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1731         },
1732         { /* MC1 Channel 4 */
1733                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1734                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1735         },
1736         { /* IRP */
1737                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1738                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1739         },
1740         { /* QPI0 Port 0 */
1741                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1742                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1743         },
1744         { /* QPI0 Port 1 */
1745                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1746                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1747         },
1748         { /* QPI1 Port 2 */
1749                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1750                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1751         },
1752         { /* R2PCIe */
1753                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1754                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1755         },
1756         { /* R3QPI0 Link 0 */
1757                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1758                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1759         },
1760         { /* R3QPI0 Link 1 */
1761                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1762                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1763         },
1764         { /* R3QPI1 Link 2 */
1765                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1766                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1767         },
1768         { /* QPI Port 0 filter  */
1769                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1770                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1771                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1772         },
1773         { /* QPI Port 0 filter  */
1774                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1775                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1776                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1777         },
1778         { /* end: all zeroes */ }
1779 };
1780
1781 static struct pci_driver ivbep_uncore_pci_driver = {
1782         .name           = "ivbep_uncore",
1783         .id_table       = ivbep_uncore_pci_ids,
1784 };
1785
1786 int ivbep_uncore_pci_init(void)
1787 {
1788         int ret = snbep_pci2phy_map_init(0x0e1e);
1789         if (ret)
1790                 return ret;
1791         uncore_pci_uncores = ivbep_pci_uncores;
1792         uncore_pci_driver = &ivbep_uncore_pci_driver;
1793         return 0;
1794 }
1795 /* end of IvyTown uncore support */
1796
1797 /* KNL uncore support */
1798 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1799         &format_attr_event.attr,
1800         &format_attr_umask.attr,
1801         &format_attr_edge.attr,
1802         &format_attr_tid_en.attr,
1803         &format_attr_inv.attr,
1804         &format_attr_thresh5.attr,
1805         NULL,
1806 };
1807
1808 static struct attribute_group knl_uncore_ubox_format_group = {
1809         .name = "format",
1810         .attrs = knl_uncore_ubox_formats_attr,
1811 };
1812
1813 static struct intel_uncore_type knl_uncore_ubox = {
1814         .name                   = "ubox",
1815         .num_counters           = 2,
1816         .num_boxes              = 1,
1817         .perf_ctr_bits          = 48,
1818         .fixed_ctr_bits         = 48,
1819         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1820         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1821         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1822         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1823         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1824         .ops                    = &snbep_uncore_msr_ops,
1825         .format_group           = &knl_uncore_ubox_format_group,
1826 };
1827
1828 static struct attribute *knl_uncore_cha_formats_attr[] = {
1829         &format_attr_event.attr,
1830         &format_attr_umask.attr,
1831         &format_attr_qor.attr,
1832         &format_attr_edge.attr,
1833         &format_attr_tid_en.attr,
1834         &format_attr_inv.attr,
1835         &format_attr_thresh8.attr,
1836         &format_attr_filter_tid4.attr,
1837         &format_attr_filter_link3.attr,
1838         &format_attr_filter_state4.attr,
1839         &format_attr_filter_local.attr,
1840         &format_attr_filter_all_op.attr,
1841         &format_attr_filter_nnm.attr,
1842         &format_attr_filter_opc3.attr,
1843         &format_attr_filter_nc.attr,
1844         &format_attr_filter_isoc.attr,
1845         NULL,
1846 };
1847
1848 static struct attribute_group knl_uncore_cha_format_group = {
1849         .name = "format",
1850         .attrs = knl_uncore_cha_formats_attr,
1851 };
1852
1853 static struct event_constraint knl_uncore_cha_constraints[] = {
1854         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1855         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1856         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1857         EVENT_CONSTRAINT_END
1858 };
1859
1860 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1861         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1862                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1863         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1864         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1865         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1866         EVENT_EXTRA_END
1867 };
1868
1869 static u64 knl_cha_filter_mask(int fields)
1870 {
1871         u64 mask = 0;
1872
1873         if (fields & 0x1)
1874                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1875         if (fields & 0x2)
1876                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1877         if (fields & 0x4)
1878                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1879         return mask;
1880 }
1881
1882 static struct event_constraint *
1883 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1884 {
1885         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1886 }
1887
1888 static int knl_cha_hw_config(struct intel_uncore_box *box,
1889                              struct perf_event *event)
1890 {
1891         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1892         struct extra_reg *er;
1893         int idx = 0;
1894
1895         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1896                 if (er->event != (event->hw.config & er->config_mask))
1897                         continue;
1898                 idx |= er->idx;
1899         }
1900
1901         if (idx) {
1902                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1903                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1904                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1905                 reg1->idx = idx;
1906         }
1907         return 0;
1908 }
1909
1910 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1911                                     struct perf_event *event);
1912
1913 static struct intel_uncore_ops knl_uncore_cha_ops = {
1914         .init_box               = snbep_uncore_msr_init_box,
1915         .disable_box            = snbep_uncore_msr_disable_box,
1916         .enable_box             = snbep_uncore_msr_enable_box,
1917         .disable_event          = snbep_uncore_msr_disable_event,
1918         .enable_event           = hswep_cbox_enable_event,
1919         .read_counter           = uncore_msr_read_counter,
1920         .hw_config              = knl_cha_hw_config,
1921         .get_constraint         = knl_cha_get_constraint,
1922         .put_constraint         = snbep_cbox_put_constraint,
1923 };
1924
1925 static struct intel_uncore_type knl_uncore_cha = {
1926         .name                   = "cha",
1927         .num_counters           = 4,
1928         .num_boxes              = 38,
1929         .perf_ctr_bits          = 48,
1930         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
1931         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
1932         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
1933         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
1934         .msr_offset             = KNL_CHA_MSR_OFFSET,
1935         .num_shared_regs        = 1,
1936         .constraints            = knl_uncore_cha_constraints,
1937         .ops                    = &knl_uncore_cha_ops,
1938         .format_group           = &knl_uncore_cha_format_group,
1939 };
1940
1941 static struct attribute *knl_uncore_pcu_formats_attr[] = {
1942         &format_attr_event2.attr,
1943         &format_attr_use_occ_ctr.attr,
1944         &format_attr_occ_sel.attr,
1945         &format_attr_edge.attr,
1946         &format_attr_tid_en.attr,
1947         &format_attr_inv.attr,
1948         &format_attr_thresh6.attr,
1949         &format_attr_occ_invert.attr,
1950         &format_attr_occ_edge_det.attr,
1951         NULL,
1952 };
1953
1954 static struct attribute_group knl_uncore_pcu_format_group = {
1955         .name = "format",
1956         .attrs = knl_uncore_pcu_formats_attr,
1957 };
1958
1959 static struct intel_uncore_type knl_uncore_pcu = {
1960         .name                   = "pcu",
1961         .num_counters           = 4,
1962         .num_boxes              = 1,
1963         .perf_ctr_bits          = 48,
1964         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
1965         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
1966         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
1967         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
1968         .ops                    = &snbep_uncore_msr_ops,
1969         .format_group           = &knl_uncore_pcu_format_group,
1970 };
1971
1972 static struct intel_uncore_type *knl_msr_uncores[] = {
1973         &knl_uncore_ubox,
1974         &knl_uncore_cha,
1975         &knl_uncore_pcu,
1976         NULL,
1977 };
1978
1979 void knl_uncore_cpu_init(void)
1980 {
1981         uncore_msr_uncores = knl_msr_uncores;
1982 }
1983
1984 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
1985 {
1986         struct pci_dev *pdev = box->pci_dev;
1987         int box_ctl = uncore_pci_box_ctl(box);
1988
1989         pci_write_config_dword(pdev, box_ctl, 0);
1990 }
1991
1992 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
1993                                         struct perf_event *event)
1994 {
1995         struct pci_dev *pdev = box->pci_dev;
1996         struct hw_perf_event *hwc = &event->hw;
1997
1998         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
1999                                                         == UNCORE_FIXED_EVENT)
2000                 pci_write_config_dword(pdev, hwc->config_base,
2001                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2002         else
2003                 pci_write_config_dword(pdev, hwc->config_base,
2004                                        hwc->config | SNBEP_PMON_CTL_EN);
2005 }
2006
2007 static struct intel_uncore_ops knl_uncore_imc_ops = {
2008         .init_box       = snbep_uncore_pci_init_box,
2009         .disable_box    = snbep_uncore_pci_disable_box,
2010         .enable_box     = knl_uncore_imc_enable_box,
2011         .read_counter   = snbep_uncore_pci_read_counter,
2012         .enable_event   = knl_uncore_imc_enable_event,
2013         .disable_event  = snbep_uncore_pci_disable_event,
2014 };
2015
2016 static struct intel_uncore_type knl_uncore_imc_uclk = {
2017         .name                   = "imc_uclk",
2018         .num_counters           = 4,
2019         .num_boxes              = 2,
2020         .perf_ctr_bits          = 48,
2021         .fixed_ctr_bits         = 48,
2022         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2023         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2024         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2025         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2026         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2027         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2028         .ops                    = &knl_uncore_imc_ops,
2029         .format_group           = &snbep_uncore_format_group,
2030 };
2031
2032 static struct intel_uncore_type knl_uncore_imc_dclk = {
2033         .name                   = "imc",
2034         .num_counters           = 4,
2035         .num_boxes              = 6,
2036         .perf_ctr_bits          = 48,
2037         .fixed_ctr_bits         = 48,
2038         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2039         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2040         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2041         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2042         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2043         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2044         .ops                    = &knl_uncore_imc_ops,
2045         .format_group           = &snbep_uncore_format_group,
2046 };
2047
2048 static struct intel_uncore_type knl_uncore_edc_uclk = {
2049         .name                   = "edc_uclk",
2050         .num_counters           = 4,
2051         .num_boxes              = 8,
2052         .perf_ctr_bits          = 48,
2053         .fixed_ctr_bits         = 48,
2054         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2055         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2056         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2057         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2058         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2059         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2060         .ops                    = &knl_uncore_imc_ops,
2061         .format_group           = &snbep_uncore_format_group,
2062 };
2063
2064 static struct intel_uncore_type knl_uncore_edc_eclk = {
2065         .name                   = "edc_eclk",
2066         .num_counters           = 4,
2067         .num_boxes              = 8,
2068         .perf_ctr_bits          = 48,
2069         .fixed_ctr_bits         = 48,
2070         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2071         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2072         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2073         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2074         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2075         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2076         .ops                    = &knl_uncore_imc_ops,
2077         .format_group           = &snbep_uncore_format_group,
2078 };
2079
2080 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2081         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2082         EVENT_CONSTRAINT_END
2083 };
2084
2085 static struct intel_uncore_type knl_uncore_m2pcie = {
2086         .name           = "m2pcie",
2087         .num_counters   = 4,
2088         .num_boxes      = 1,
2089         .perf_ctr_bits  = 48,
2090         .constraints    = knl_uncore_m2pcie_constraints,
2091         SNBEP_UNCORE_PCI_COMMON_INIT(),
2092 };
2093
2094 static struct attribute *knl_uncore_irp_formats_attr[] = {
2095         &format_attr_event.attr,
2096         &format_attr_umask.attr,
2097         &format_attr_qor.attr,
2098         &format_attr_edge.attr,
2099         &format_attr_inv.attr,
2100         &format_attr_thresh8.attr,
2101         NULL,
2102 };
2103
2104 static struct attribute_group knl_uncore_irp_format_group = {
2105         .name = "format",
2106         .attrs = knl_uncore_irp_formats_attr,
2107 };
2108
2109 static struct intel_uncore_type knl_uncore_irp = {
2110         .name                   = "irp",
2111         .num_counters           = 2,
2112         .num_boxes              = 1,
2113         .perf_ctr_bits          = 48,
2114         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2115         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2116         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2117         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2118         .ops                    = &snbep_uncore_pci_ops,
2119         .format_group           = &knl_uncore_irp_format_group,
2120 };
2121
2122 enum {
2123         KNL_PCI_UNCORE_MC_UCLK,
2124         KNL_PCI_UNCORE_MC_DCLK,
2125         KNL_PCI_UNCORE_EDC_UCLK,
2126         KNL_PCI_UNCORE_EDC_ECLK,
2127         KNL_PCI_UNCORE_M2PCIE,
2128         KNL_PCI_UNCORE_IRP,
2129 };
2130
2131 static struct intel_uncore_type *knl_pci_uncores[] = {
2132         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2133         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2134         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2135         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2136         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2137         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2138         NULL,
2139 };
2140
2141 /*
2142  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2143  * device type. prior to KNL, each instance of a PMU device type had a unique
2144  * device ID.
2145  *
2146  *      PCI Device ID   Uncore PMU Devices
2147  *      ----------------------------------
2148  *      0x7841          MC0 UClk, MC1 UClk
2149  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2150  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2151  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2152  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2153  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2154  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2155  *      0x7817          M2PCIe
2156  *      0x7814          IRP
2157 */
2158
2159 static const struct pci_device_id knl_uncore_pci_ids[] = {
2160         { /* MC UClk */
2161                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2162                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_UCLK, 0),
2163         },
2164         { /* MC DClk Channel */
2165                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2166                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_DCLK, 0),
2167         },
2168         { /* EDC UClk */
2169                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2170                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_UCLK, 0),
2171         },
2172         { /* EDC EClk */
2173                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2174                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_ECLK, 0),
2175         },
2176         { /* M2PCIe */
2177                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2178                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2179         },
2180         { /* IRP */
2181                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2182                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2183         },
2184         { /* end: all zeroes */ }
2185 };
2186
2187 static struct pci_driver knl_uncore_pci_driver = {
2188         .name           = "knl_uncore",
2189         .id_table       = knl_uncore_pci_ids,
2190 };
2191
2192 int knl_uncore_pci_init(void)
2193 {
2194         int ret;
2195
2196         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2197         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2198         if (ret)
2199                 return ret;
2200         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2201         if (ret)
2202                 return ret;
2203         uncore_pci_uncores = knl_pci_uncores;
2204         uncore_pci_driver = &knl_uncore_pci_driver;
2205         return 0;
2206 }
2207
2208 /* end of KNL uncore support */
2209
2210 /* Haswell-EP uncore support */
2211 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2212         &format_attr_event.attr,
2213         &format_attr_umask.attr,
2214         &format_attr_edge.attr,
2215         &format_attr_inv.attr,
2216         &format_attr_thresh5.attr,
2217         &format_attr_filter_tid2.attr,
2218         &format_attr_filter_cid.attr,
2219         NULL,
2220 };
2221
2222 static struct attribute_group hswep_uncore_ubox_format_group = {
2223         .name = "format",
2224         .attrs = hswep_uncore_ubox_formats_attr,
2225 };
2226
2227 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2228 {
2229         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2230         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2231         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2232         reg1->idx = 0;
2233         return 0;
2234 }
2235
2236 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2237         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2238         .hw_config              = hswep_ubox_hw_config,
2239         .get_constraint         = uncore_get_constraint,
2240         .put_constraint         = uncore_put_constraint,
2241 };
2242
2243 static struct intel_uncore_type hswep_uncore_ubox = {
2244         .name                   = "ubox",
2245         .num_counters           = 2,
2246         .num_boxes              = 1,
2247         .perf_ctr_bits          = 44,
2248         .fixed_ctr_bits         = 48,
2249         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2250         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2251         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2252         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2253         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2254         .num_shared_regs        = 1,
2255         .ops                    = &hswep_uncore_ubox_ops,
2256         .format_group           = &hswep_uncore_ubox_format_group,
2257 };
2258
2259 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2260         &format_attr_event.attr,
2261         &format_attr_umask.attr,
2262         &format_attr_edge.attr,
2263         &format_attr_tid_en.attr,
2264         &format_attr_thresh8.attr,
2265         &format_attr_filter_tid3.attr,
2266         &format_attr_filter_link2.attr,
2267         &format_attr_filter_state3.attr,
2268         &format_attr_filter_nid2.attr,
2269         &format_attr_filter_opc2.attr,
2270         &format_attr_filter_nc.attr,
2271         &format_attr_filter_c6.attr,
2272         &format_attr_filter_isoc.attr,
2273         NULL,
2274 };
2275
2276 static struct attribute_group hswep_uncore_cbox_format_group = {
2277         .name = "format",
2278         .attrs = hswep_uncore_cbox_formats_attr,
2279 };
2280
2281 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2282         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2283         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2284         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2285         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2286         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2287         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2288         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2289         EVENT_CONSTRAINT_END
2290 };
2291
2292 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2293         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2294                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2295         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2296         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2297         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2298         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2299         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2300         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2301         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2302         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2303         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2304         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2305         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2306         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2307         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2308         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2309         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2310         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2311         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2312         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2313         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2314         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2315         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2316         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2317         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2318         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2319         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2320         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2321         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2322         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2323         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2324         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2325         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2326         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2327         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2328         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2329         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2330         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2331         EVENT_EXTRA_END
2332 };
2333
2334 static u64 hswep_cbox_filter_mask(int fields)
2335 {
2336         u64 mask = 0;
2337         if (fields & 0x1)
2338                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2339         if (fields & 0x2)
2340                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2341         if (fields & 0x4)
2342                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2343         if (fields & 0x8)
2344                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2345         if (fields & 0x10) {
2346                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2347                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2348                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2349                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2350         }
2351         return mask;
2352 }
2353
2354 static struct event_constraint *
2355 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2356 {
2357         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2358 }
2359
2360 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2361 {
2362         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2363         struct extra_reg *er;
2364         int idx = 0;
2365
2366         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2367                 if (er->event != (event->hw.config & er->config_mask))
2368                         continue;
2369                 idx |= er->idx;
2370         }
2371
2372         if (idx) {
2373                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2374                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2375                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2376                 reg1->idx = idx;
2377         }
2378         return 0;
2379 }
2380
2381 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2382                                   struct perf_event *event)
2383 {
2384         struct hw_perf_event *hwc = &event->hw;
2385         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2386
2387         if (reg1->idx != EXTRA_REG_NONE) {
2388                 u64 filter = uncore_shared_reg_config(box, 0);
2389                 wrmsrl(reg1->reg, filter & 0xffffffff);
2390                 wrmsrl(reg1->reg + 1, filter >> 32);
2391         }
2392
2393         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2394 }
2395
2396 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2397         .init_box               = snbep_uncore_msr_init_box,
2398         .disable_box            = snbep_uncore_msr_disable_box,
2399         .enable_box             = snbep_uncore_msr_enable_box,
2400         .disable_event          = snbep_uncore_msr_disable_event,
2401         .enable_event           = hswep_cbox_enable_event,
2402         .read_counter           = uncore_msr_read_counter,
2403         .hw_config              = hswep_cbox_hw_config,
2404         .get_constraint         = hswep_cbox_get_constraint,
2405         .put_constraint         = snbep_cbox_put_constraint,
2406 };
2407
2408 static struct intel_uncore_type hswep_uncore_cbox = {
2409         .name                   = "cbox",
2410         .num_counters           = 4,
2411         .num_boxes              = 18,
2412         .perf_ctr_bits          = 48,
2413         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2414         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2415         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2416         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2417         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2418         .num_shared_regs        = 1,
2419         .constraints            = hswep_uncore_cbox_constraints,
2420         .ops                    = &hswep_uncore_cbox_ops,
2421         .format_group           = &hswep_uncore_cbox_format_group,
2422 };
2423
2424 /*
2425  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2426  */
2427 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2428 {
2429         unsigned msr = uncore_msr_box_ctl(box);
2430
2431         if (msr) {
2432                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2433                 u64 flags = 0;
2434                 int i;
2435
2436                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2437                         flags |= (1ULL << i);
2438                         wrmsrl(msr, flags);
2439                 }
2440         }
2441 }
2442
2443 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2444         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2445         .init_box               = hswep_uncore_sbox_msr_init_box
2446 };
2447
2448 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2449         &format_attr_event.attr,
2450         &format_attr_umask.attr,
2451         &format_attr_edge.attr,
2452         &format_attr_tid_en.attr,
2453         &format_attr_inv.attr,
2454         &format_attr_thresh8.attr,
2455         NULL,
2456 };
2457
2458 static struct attribute_group hswep_uncore_sbox_format_group = {
2459         .name = "format",
2460         .attrs = hswep_uncore_sbox_formats_attr,
2461 };
2462
2463 static struct intel_uncore_type hswep_uncore_sbox = {
2464         .name                   = "sbox",
2465         .num_counters           = 4,
2466         .num_boxes              = 4,
2467         .perf_ctr_bits          = 44,
2468         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2469         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2470         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2471         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2472         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2473         .ops                    = &hswep_uncore_sbox_msr_ops,
2474         .format_group           = &hswep_uncore_sbox_format_group,
2475 };
2476
2477 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2478 {
2479         struct hw_perf_event *hwc = &event->hw;
2480         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2481         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2482
2483         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2484                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2485                 reg1->idx = ev_sel - 0xb;
2486                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2487         }
2488         return 0;
2489 }
2490
2491 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2492         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2493         .hw_config              = hswep_pcu_hw_config,
2494         .get_constraint         = snbep_pcu_get_constraint,
2495         .put_constraint         = snbep_pcu_put_constraint,
2496 };
2497
2498 static struct intel_uncore_type hswep_uncore_pcu = {
2499         .name                   = "pcu",
2500         .num_counters           = 4,
2501         .num_boxes              = 1,
2502         .perf_ctr_bits          = 48,
2503         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2504         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2505         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2506         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2507         .num_shared_regs        = 1,
2508         .ops                    = &hswep_uncore_pcu_ops,
2509         .format_group           = &snbep_uncore_pcu_format_group,
2510 };
2511
2512 static struct intel_uncore_type *hswep_msr_uncores[] = {
2513         &hswep_uncore_ubox,
2514         &hswep_uncore_cbox,
2515         &hswep_uncore_sbox,
2516         &hswep_uncore_pcu,
2517         NULL,
2518 };
2519
2520 void hswep_uncore_cpu_init(void)
2521 {
2522         int pkg = topology_phys_to_logical_pkg(0);
2523
2524         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2525                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2526
2527         /* Detect 6-8 core systems with only two SBOXes */
2528         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2529                 u32 capid4;
2530
2531                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2532                                       0x94, &capid4);
2533                 if (((capid4 >> 6) & 0x3) == 0)
2534                         hswep_uncore_sbox.num_boxes = 2;
2535         }
2536
2537         uncore_msr_uncores = hswep_msr_uncores;
2538 }
2539
2540 static struct intel_uncore_type hswep_uncore_ha = {
2541         .name           = "ha",
2542         .num_counters   = 5,
2543         .num_boxes      = 2,
2544         .perf_ctr_bits  = 48,
2545         SNBEP_UNCORE_PCI_COMMON_INIT(),
2546 };
2547
2548 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2549         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2550         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2551         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2552         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2553         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2554         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2555         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2556         { /* end: all zeroes */ },
2557 };
2558
2559 static struct intel_uncore_type hswep_uncore_imc = {
2560         .name           = "imc",
2561         .num_counters   = 5,
2562         .num_boxes      = 8,
2563         .perf_ctr_bits  = 48,
2564         .fixed_ctr_bits = 48,
2565         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2566         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2567         .event_descs    = hswep_uncore_imc_events,
2568         SNBEP_UNCORE_PCI_COMMON_INIT(),
2569 };
2570
2571 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2572
2573 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2574 {
2575         struct pci_dev *pdev = box->pci_dev;
2576         struct hw_perf_event *hwc = &event->hw;
2577         u64 count = 0;
2578
2579         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2580         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2581
2582         return count;
2583 }
2584
2585 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2586         .init_box       = snbep_uncore_pci_init_box,
2587         .disable_box    = snbep_uncore_pci_disable_box,
2588         .enable_box     = snbep_uncore_pci_enable_box,
2589         .disable_event  = ivbep_uncore_irp_disable_event,
2590         .enable_event   = ivbep_uncore_irp_enable_event,
2591         .read_counter   = hswep_uncore_irp_read_counter,
2592 };
2593
2594 static struct intel_uncore_type hswep_uncore_irp = {
2595         .name                   = "irp",
2596         .num_counters           = 4,
2597         .num_boxes              = 1,
2598         .perf_ctr_bits          = 48,
2599         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2600         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2601         .ops                    = &hswep_uncore_irp_ops,
2602         .format_group           = &snbep_uncore_format_group,
2603 };
2604
2605 static struct intel_uncore_type hswep_uncore_qpi = {
2606         .name                   = "qpi",
2607         .num_counters           = 5,
2608         .num_boxes              = 3,
2609         .perf_ctr_bits          = 48,
2610         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2611         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2612         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2613         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2614         .num_shared_regs        = 1,
2615         .ops                    = &snbep_uncore_qpi_ops,
2616         .format_group           = &snbep_uncore_qpi_format_group,
2617 };
2618
2619 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2620         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2621         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2622         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2623         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2624         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2625         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2626         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2627         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2628         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2629         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2630         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2631         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2632         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2633         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2634         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2635         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2636         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2637         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2638         EVENT_CONSTRAINT_END
2639 };
2640
2641 static struct intel_uncore_type hswep_uncore_r2pcie = {
2642         .name           = "r2pcie",
2643         .num_counters   = 4,
2644         .num_boxes      = 1,
2645         .perf_ctr_bits  = 48,
2646         .constraints    = hswep_uncore_r2pcie_constraints,
2647         SNBEP_UNCORE_PCI_COMMON_INIT(),
2648 };
2649
2650 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2651         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2652         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2653         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2654         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2655         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2656         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2657         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2658         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2659         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2660         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2661         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2662         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2663         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2664         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2665         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2666         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2667         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2668         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2669         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2670         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2671         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2672         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2673         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2674         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2675         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2676         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2677         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2678         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2679         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2680         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2681         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2682         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2683         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2684         EVENT_CONSTRAINT_END
2685 };
2686
2687 static struct intel_uncore_type hswep_uncore_r3qpi = {
2688         .name           = "r3qpi",
2689         .num_counters   = 4,
2690         .num_boxes      = 3,
2691         .perf_ctr_bits  = 44,
2692         .constraints    = hswep_uncore_r3qpi_constraints,
2693         SNBEP_UNCORE_PCI_COMMON_INIT(),
2694 };
2695
2696 enum {
2697         HSWEP_PCI_UNCORE_HA,
2698         HSWEP_PCI_UNCORE_IMC,
2699         HSWEP_PCI_UNCORE_IRP,
2700         HSWEP_PCI_UNCORE_QPI,
2701         HSWEP_PCI_UNCORE_R2PCIE,
2702         HSWEP_PCI_UNCORE_R3QPI,
2703 };
2704
2705 static struct intel_uncore_type *hswep_pci_uncores[] = {
2706         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2707         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2708         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2709         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2710         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2711         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2712         NULL,
2713 };
2714
2715 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2716         { /* Home Agent 0 */
2717                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2718                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2719         },
2720         { /* Home Agent 1 */
2721                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2722                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2723         },
2724         { /* MC0 Channel 0 */
2725                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2726                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2727         },
2728         { /* MC0 Channel 1 */
2729                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2730                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2731         },
2732         { /* MC0 Channel 2 */
2733                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2734                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2735         },
2736         { /* MC0 Channel 3 */
2737                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2738                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2739         },
2740         { /* MC1 Channel 0 */
2741                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2742                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2743         },
2744         { /* MC1 Channel 1 */
2745                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2746                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2747         },
2748         { /* MC1 Channel 2 */
2749                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2750                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2751         },
2752         { /* MC1 Channel 3 */
2753                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2754                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2755         },
2756         { /* IRP */
2757                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2758                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2759         },
2760         { /* QPI0 Port 0 */
2761                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2762                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2763         },
2764         { /* QPI0 Port 1 */
2765                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2766                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2767         },
2768         { /* QPI1 Port 2 */
2769                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2770                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2771         },
2772         { /* R2PCIe */
2773                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2774                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2775         },
2776         { /* R3QPI0 Link 0 */
2777                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2778                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2779         },
2780         { /* R3QPI0 Link 1 */
2781                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2782                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2783         },
2784         { /* R3QPI1 Link 2 */
2785                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2786                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2787         },
2788         { /* QPI Port 0 filter  */
2789                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2790                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2791                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2792         },
2793         { /* QPI Port 1 filter  */
2794                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2795                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2796                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2797         },
2798         { /* PCU.3 (for Capability registers) */
2799                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2800                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2801                                                    HSWEP_PCI_PCU_3),
2802         },
2803         { /* end: all zeroes */ }
2804 };
2805
2806 static struct pci_driver hswep_uncore_pci_driver = {
2807         .name           = "hswep_uncore",
2808         .id_table       = hswep_uncore_pci_ids,
2809 };
2810
2811 int hswep_uncore_pci_init(void)
2812 {
2813         int ret = snbep_pci2phy_map_init(0x2f1e);
2814         if (ret)
2815                 return ret;
2816         uncore_pci_uncores = hswep_pci_uncores;
2817         uncore_pci_driver = &hswep_uncore_pci_driver;
2818         return 0;
2819 }
2820 /* end of Haswell-EP uncore support */
2821
2822 /* BDX uncore support */
2823
2824 static struct intel_uncore_type bdx_uncore_ubox = {
2825         .name                   = "ubox",
2826         .num_counters           = 2,
2827         .num_boxes              = 1,
2828         .perf_ctr_bits          = 48,
2829         .fixed_ctr_bits         = 48,
2830         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2831         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2832         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2833         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2834         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2835         .num_shared_regs        = 1,
2836         .ops                    = &ivbep_uncore_msr_ops,
2837         .format_group           = &ivbep_uncore_ubox_format_group,
2838 };
2839
2840 static struct event_constraint bdx_uncore_cbox_constraints[] = {
2841         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2842         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2843         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2844         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2845         EVENT_CONSTRAINT_END
2846 };
2847
2848 static struct intel_uncore_type bdx_uncore_cbox = {
2849         .name                   = "cbox",
2850         .num_counters           = 4,
2851         .num_boxes              = 24,
2852         .perf_ctr_bits          = 48,
2853         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2854         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2855         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2856         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2857         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2858         .num_shared_regs        = 1,
2859         .constraints            = bdx_uncore_cbox_constraints,
2860         .ops                    = &hswep_uncore_cbox_ops,
2861         .format_group           = &hswep_uncore_cbox_format_group,
2862 };
2863
2864 static struct intel_uncore_type bdx_uncore_sbox = {
2865         .name                   = "sbox",
2866         .num_counters           = 4,
2867         .num_boxes              = 4,
2868         .perf_ctr_bits          = 48,
2869         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2870         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2871         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2872         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2873         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2874         .ops                    = &hswep_uncore_sbox_msr_ops,
2875         .format_group           = &hswep_uncore_sbox_format_group,
2876 };
2877
2878 #define BDX_MSR_UNCORE_SBOX     3
2879
2880 static struct intel_uncore_type *bdx_msr_uncores[] = {
2881         &bdx_uncore_ubox,
2882         &bdx_uncore_cbox,
2883         &hswep_uncore_pcu,
2884         &bdx_uncore_sbox,
2885         NULL,
2886 };
2887
2888 void bdx_uncore_cpu_init(void)
2889 {
2890         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2891                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2892         uncore_msr_uncores = bdx_msr_uncores;
2893
2894         /* BDX-DE doesn't have SBOX */
2895         if (boot_cpu_data.x86_model == 86)
2896                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
2897 }
2898
2899 static struct intel_uncore_type bdx_uncore_ha = {
2900         .name           = "ha",
2901         .num_counters   = 4,
2902         .num_boxes      = 2,
2903         .perf_ctr_bits  = 48,
2904         SNBEP_UNCORE_PCI_COMMON_INIT(),
2905 };
2906
2907 static struct intel_uncore_type bdx_uncore_imc = {
2908         .name           = "imc",
2909         .num_counters   = 5,
2910         .num_boxes      = 8,
2911         .perf_ctr_bits  = 48,
2912         .fixed_ctr_bits = 48,
2913         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2914         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2915         .event_descs    = hswep_uncore_imc_events,
2916         SNBEP_UNCORE_PCI_COMMON_INIT(),
2917 };
2918
2919 static struct intel_uncore_type bdx_uncore_irp = {
2920         .name                   = "irp",
2921         .num_counters           = 4,
2922         .num_boxes              = 1,
2923         .perf_ctr_bits          = 48,
2924         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2925         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2926         .ops                    = &hswep_uncore_irp_ops,
2927         .format_group           = &snbep_uncore_format_group,
2928 };
2929
2930 static struct intel_uncore_type bdx_uncore_qpi = {
2931         .name                   = "qpi",
2932         .num_counters           = 4,
2933         .num_boxes              = 3,
2934         .perf_ctr_bits          = 48,
2935         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2936         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2937         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2938         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2939         .num_shared_regs        = 1,
2940         .ops                    = &snbep_uncore_qpi_ops,
2941         .format_group           = &snbep_uncore_qpi_format_group,
2942 };
2943
2944 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
2945         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2946         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2947         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2948         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2949         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2950         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2951         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2952         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2953         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2954         EVENT_CONSTRAINT_END
2955 };
2956
2957 static struct intel_uncore_type bdx_uncore_r2pcie = {
2958         .name           = "r2pcie",
2959         .num_counters   = 4,
2960         .num_boxes      = 1,
2961         .perf_ctr_bits  = 48,
2962         .constraints    = bdx_uncore_r2pcie_constraints,
2963         SNBEP_UNCORE_PCI_COMMON_INIT(),
2964 };
2965
2966 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
2967         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
2968         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2969         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2970         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2971         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2972         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2973         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2974         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2975         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2976         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2977         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2978         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2979         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2980         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2981         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2982         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2983         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2984         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2985         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2986         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2987         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2988         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2989         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2990         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2991         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2992         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2993         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2994         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2995         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2996         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2997         EVENT_CONSTRAINT_END
2998 };
2999
3000 static struct intel_uncore_type bdx_uncore_r3qpi = {
3001         .name           = "r3qpi",
3002         .num_counters   = 3,
3003         .num_boxes      = 3,
3004         .perf_ctr_bits  = 48,
3005         .constraints    = bdx_uncore_r3qpi_constraints,
3006         SNBEP_UNCORE_PCI_COMMON_INIT(),
3007 };
3008
3009 enum {
3010         BDX_PCI_UNCORE_HA,
3011         BDX_PCI_UNCORE_IMC,
3012         BDX_PCI_UNCORE_IRP,
3013         BDX_PCI_UNCORE_QPI,
3014         BDX_PCI_UNCORE_R2PCIE,
3015         BDX_PCI_UNCORE_R3QPI,
3016 };
3017
3018 static struct intel_uncore_type *bdx_pci_uncores[] = {
3019         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3020         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3021         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3022         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3023         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3024         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3025         NULL,
3026 };
3027
3028 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3029         { /* Home Agent 0 */
3030                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3031                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3032         },
3033         { /* Home Agent 1 */
3034                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3035                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3036         },
3037         { /* MC0 Channel 0 */
3038                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3039                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3040         },
3041         { /* MC0 Channel 1 */
3042                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3043                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3044         },
3045         { /* MC0 Channel 2 */
3046                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3047                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3048         },
3049         { /* MC0 Channel 3 */
3050                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3051                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3052         },
3053         { /* MC1 Channel 0 */
3054                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3055                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3056         },
3057         { /* MC1 Channel 1 */
3058                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3059                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3060         },
3061         { /* MC1 Channel 2 */
3062                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3063                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3064         },
3065         { /* MC1 Channel 3 */
3066                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3067                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3068         },
3069         { /* IRP */
3070                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3071                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3072         },
3073         { /* QPI0 Port 0 */
3074                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3075                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3076         },
3077         { /* QPI0 Port 1 */
3078                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3079                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3080         },
3081         { /* QPI1 Port 2 */
3082                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3083                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3084         },
3085         { /* R2PCIe */
3086                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3087                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3088         },
3089         { /* R3QPI0 Link 0 */
3090                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3091                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3092         },
3093         { /* R3QPI0 Link 1 */
3094                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3095                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3096         },
3097         { /* R3QPI1 Link 2 */
3098                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3099                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3100         },
3101         { /* QPI Port 0 filter  */
3102                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3103                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3104         },
3105         { /* QPI Port 1 filter  */
3106                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3107                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3108         },
3109         { /* QPI Port 2 filter  */
3110                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3111                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3112         },
3113         { /* end: all zeroes */ }
3114 };
3115
3116 static struct pci_driver bdx_uncore_pci_driver = {
3117         .name           = "bdx_uncore",
3118         .id_table       = bdx_uncore_pci_ids,
3119 };
3120
3121 int bdx_uncore_pci_init(void)
3122 {
3123         int ret = snbep_pci2phy_map_init(0x6f1e);
3124
3125         if (ret)
3126                 return ret;
3127         uncore_pci_uncores = bdx_pci_uncores;
3128         uncore_pci_driver = &bdx_uncore_pci_driver;
3129         return 0;
3130 }
3131
3132 /* end of BDX uncore support */