Merge tag 'objtool_urgent_for_v5.18_rc5' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / perf / arm-cmn.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2016-2020 Arm Limited
3 // CMN-600 Coherent Mesh Network PMU driver
4
5 #include <linux/acpi.h>
6 #include <linux/bitfield.h>
7 #include <linux/bitops.h>
8 #include <linux/debugfs.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/perf_event.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/sort.h>
20
21 /* Common register stuff */
22 #define CMN_NODE_INFO                   0x0000
23 #define CMN_NI_NODE_TYPE                GENMASK_ULL(15, 0)
24 #define CMN_NI_NODE_ID                  GENMASK_ULL(31, 16)
25 #define CMN_NI_LOGICAL_ID               GENMASK_ULL(47, 32)
26
27 #define CMN_NODEID_DEVID(reg)           ((reg) & 3)
28 #define CMN_NODEID_EXT_DEVID(reg)       ((reg) & 1)
29 #define CMN_NODEID_PID(reg)             (((reg) >> 2) & 1)
30 #define CMN_NODEID_EXT_PID(reg)         (((reg) >> 1) & 3)
31 #define CMN_NODEID_1x1_PID(reg)         (((reg) >> 2) & 7)
32 #define CMN_NODEID_X(reg, bits)         ((reg) >> (3 + (bits)))
33 #define CMN_NODEID_Y(reg, bits)         (((reg) >> 3) & ((1U << (bits)) - 1))
34
35 #define CMN_CHILD_INFO                  0x0080
36 #define CMN_CI_CHILD_COUNT              GENMASK_ULL(15, 0)
37 #define CMN_CI_CHILD_PTR_OFFSET         GENMASK_ULL(31, 16)
38
39 #define CMN_CHILD_NODE_ADDR             GENMASK(27, 0)
40 #define CMN_CHILD_NODE_EXTERNAL         BIT(31)
41
42 #define CMN_MAX_DIMENSION               8
43 #define CMN_MAX_XPS                     (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
44 #define CMN_MAX_DTMS                    (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
45
46 /* The CFG node has various info besides the discovery tree */
47 #define CMN_CFGM_PERIPH_ID_2            0x0010
48 #define CMN_CFGM_PID2_REVISION          GENMASK(7, 4)
49
50 #define CMN_CFGM_INFO_GLOBAL            0x900
51 #define CMN_INFO_MULTIPLE_DTM_EN        BIT_ULL(63)
52 #define CMN_INFO_RSP_VC_NUM             GENMASK_ULL(53, 52)
53 #define CMN_INFO_DAT_VC_NUM             GENMASK_ULL(51, 50)
54
55 /* XPs also have some local topology info which has uses too */
56 #define CMN_MXP__CONNECT_INFO_P0        0x0008
57 #define CMN_MXP__CONNECT_INFO_P1        0x0010
58 #define CMN_MXP__CONNECT_INFO_P2        0x0028
59 #define CMN_MXP__CONNECT_INFO_P3        0x0030
60 #define CMN_MXP__CONNECT_INFO_P4        0x0038
61 #define CMN_MXP__CONNECT_INFO_P5        0x0040
62
63 /* PMU registers occupy the 3rd 4KB page of each node's region */
64 #define CMN_PMU_OFFSET                  0x2000
65
66 /* For most nodes, this is all there is */
67 #define CMN_PMU_EVENT_SEL               0x000
68 #define CMN_PMU_EVENTn_ID_SHIFT(n)      ((n) * 8)
69
70 /* DTMs live in the PMU space of XP registers */
71 #define CMN_DTM_WPn(n)                  (0x1A0 + (n) * 0x18)
72 #define CMN_DTM_WPn_CONFIG(n)           (CMN_DTM_WPn(n) + 0x00)
73 #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2  GENMASK_ULL(18,17)
74 #define CMN_DTM_WPn_CONFIG_WP_COMBINE   BIT(9)
75 #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
76 #define CMN600_WPn_CONFIG_WP_COMBINE    BIT(6)
77 #define CMN600_WPn_CONFIG_WP_EXCLUSIVE  BIT(5)
78 #define CMN_DTM_WPn_CONFIG_WP_GRP       GENMASK_ULL(5, 4)
79 #define CMN_DTM_WPn_CONFIG_WP_CHN_SEL   GENMASK_ULL(3, 1)
80 #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL   BIT(0)
81 #define CMN_DTM_WPn_VAL(n)              (CMN_DTM_WPn(n) + 0x08)
82 #define CMN_DTM_WPn_MASK(n)             (CMN_DTM_WPn(n) + 0x10)
83
84 #define CMN_DTM_PMU_CONFIG              0x210
85 #define CMN__PMEVCNT0_INPUT_SEL         GENMASK_ULL(37, 32)
86 #define CMN__PMEVCNT0_INPUT_SEL_WP      0x00
87 #define CMN__PMEVCNT0_INPUT_SEL_XP      0x04
88 #define CMN__PMEVCNT0_INPUT_SEL_DEV     0x10
89 #define CMN__PMEVCNT0_GLOBAL_NUM        GENMASK_ULL(18, 16)
90 #define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n)       ((n) * 4)
91 #define CMN__PMEVCNT_PAIRED(n)          BIT(4 + (n))
92 #define CMN__PMEVCNT23_COMBINED         BIT(2)
93 #define CMN__PMEVCNT01_COMBINED         BIT(1)
94 #define CMN_DTM_PMU_CONFIG_PMU_EN       BIT(0)
95
96 #define CMN_DTM_PMEVCNT                 0x220
97
98 #define CMN_DTM_PMEVCNTSR               0x240
99
100 #define CMN_DTM_UNIT_INFO               0x0910
101
102 #define CMN_DTM_NUM_COUNTERS            4
103 /* Want more local counters? Why not replicate the whole DTM! Ugh... */
104 #define CMN_DTM_OFFSET(n)               ((n) * 0x200)
105
106 /* The DTC node is where the magic happens */
107 #define CMN_DT_DTC_CTL                  0x0a00
108 #define CMN_DT_DTC_CTL_DT_EN            BIT(0)
109
110 /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
111 #define _CMN_DT_CNT_REG(n)              ((((n) / 2) * 4 + (n) % 2) * 4)
112 #define CMN_DT_PMEVCNT(n)               (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
113 #define CMN_DT_PMCCNTR                  (CMN_PMU_OFFSET + 0x40)
114
115 #define CMN_DT_PMEVCNTSR(n)             (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
116 #define CMN_DT_PMCCNTRSR                (CMN_PMU_OFFSET + 0x90)
117
118 #define CMN_DT_PMCR                     (CMN_PMU_OFFSET + 0x100)
119 #define CMN_DT_PMCR_PMU_EN              BIT(0)
120 #define CMN_DT_PMCR_CNTR_RST            BIT(5)
121 #define CMN_DT_PMCR_OVFL_INTR_EN        BIT(6)
122
123 #define CMN_DT_PMOVSR                   (CMN_PMU_OFFSET + 0x118)
124 #define CMN_DT_PMOVSR_CLR               (CMN_PMU_OFFSET + 0x120)
125
126 #define CMN_DT_PMSSR                    (CMN_PMU_OFFSET + 0x128)
127 #define CMN_DT_PMSSR_SS_STATUS(n)       BIT(n)
128
129 #define CMN_DT_PMSRR                    (CMN_PMU_OFFSET + 0x130)
130 #define CMN_DT_PMSRR_SS_REQ             BIT(0)
131
132 #define CMN_DT_NUM_COUNTERS             8
133 #define CMN_MAX_DTCS                    4
134
135 /*
136  * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles,
137  * so throwing away one bit to make overflow handling easy is no big deal.
138  */
139 #define CMN_COUNTER_INIT                0x80000000
140 /* Similarly for the 40-bit cycle counter */
141 #define CMN_CC_INIT                     0x8000000000ULL
142
143
144 /* Event attributes */
145 #define CMN_CONFIG_TYPE                 GENMASK_ULL(15, 0)
146 #define CMN_CONFIG_EVENTID              GENMASK_ULL(23, 16)
147 #define CMN_CONFIG_OCCUPID              GENMASK_ULL(27, 24)
148 #define CMN_CONFIG_BYNODEID             BIT_ULL(31)
149 #define CMN_CONFIG_NODEID               GENMASK_ULL(47, 32)
150
151 #define CMN_EVENT_TYPE(event)           FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
152 #define CMN_EVENT_EVENTID(event)        FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
153 #define CMN_EVENT_OCCUPID(event)        FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
154 #define CMN_EVENT_BYNODEID(event)       FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
155 #define CMN_EVENT_NODEID(event)         FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
156
157 #define CMN_CONFIG_WP_COMBINE           GENMASK_ULL(27, 24)
158 #define CMN_CONFIG_WP_DEV_SEL           GENMASK_ULL(50, 48)
159 #define CMN_CONFIG_WP_CHN_SEL           GENMASK_ULL(55, 51)
160 /* Note that we don't yet support the tertiary match group on newer IPs */
161 #define CMN_CONFIG_WP_GRP               BIT_ULL(56)
162 #define CMN_CONFIG_WP_EXCLUSIVE         BIT_ULL(57)
163 #define CMN_CONFIG1_WP_VAL              GENMASK_ULL(63, 0)
164 #define CMN_CONFIG2_WP_MASK             GENMASK_ULL(63, 0)
165
166 #define CMN_EVENT_WP_COMBINE(event)     FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
167 #define CMN_EVENT_WP_DEV_SEL(event)     FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
168 #define CMN_EVENT_WP_CHN_SEL(event)     FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
169 #define CMN_EVENT_WP_GRP(event)         FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
170 #define CMN_EVENT_WP_EXCLUSIVE(event)   FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
171 #define CMN_EVENT_WP_VAL(event)         FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
172 #define CMN_EVENT_WP_MASK(event)        FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
173
174 /* Made-up event IDs for watchpoint direction */
175 #define CMN_WP_UP                       0
176 #define CMN_WP_DOWN                     2
177
178
179 enum cmn_model {
180         CMN_ANY = -1,
181         CMN600 = 1,
182         CI700 = 2,
183 };
184
185 /* CMN-600 r0px shouldn't exist in silicon, thankfully */
186 enum cmn_revision {
187         CMN600_R1P0,
188         CMN600_R1P1,
189         CMN600_R1P2,
190         CMN600_R1P3,
191         CMN600_R2P0,
192         CMN600_R3P0,
193         CMN600_R3P1,
194         CI700_R0P0 = 0,
195         CI700_R1P0,
196         CI700_R2P0,
197 };
198
199 enum cmn_node_type {
200         CMN_TYPE_INVALID,
201         CMN_TYPE_DVM,
202         CMN_TYPE_CFG,
203         CMN_TYPE_DTC,
204         CMN_TYPE_HNI,
205         CMN_TYPE_HNF,
206         CMN_TYPE_XP,
207         CMN_TYPE_SBSX,
208         CMN_TYPE_MPAM_S,
209         CMN_TYPE_MPAM_NS,
210         CMN_TYPE_RNI,
211         CMN_TYPE_RND = 0xd,
212         CMN_TYPE_RNSAM = 0xf,
213         CMN_TYPE_MTSX,
214         CMN_TYPE_CXRA = 0x100,
215         CMN_TYPE_CXHA = 0x101,
216         CMN_TYPE_CXLA = 0x102,
217         /* Not a real node type */
218         CMN_TYPE_WP = 0x7770
219 };
220
221 struct arm_cmn_node {
222         void __iomem *pmu_base;
223         u16 id, logid;
224         enum cmn_node_type type;
225
226         int dtm;
227         union {
228                 /* DN/HN-F/CXHA */
229                 struct {
230                         u8 occupid_val;
231                         u8 occupid_count;
232                 };
233                 /* XP */
234                 u8 dtc;
235         };
236         union {
237                 u8 event[4];
238                 __le32 event_sel;
239         };
240 };
241
242 struct arm_cmn_dtm {
243         void __iomem *base;
244         u32 pmu_config_low;
245         union {
246                 u8 input_sel[4];
247                 __le32 pmu_config_high;
248         };
249         s8 wp_event[4];
250 };
251
252 struct arm_cmn_dtc {
253         void __iomem *base;
254         int irq;
255         int irq_friend;
256         bool cc_active;
257
258         struct perf_event *counters[CMN_DT_NUM_COUNTERS];
259         struct perf_event *cycles;
260 };
261
262 #define CMN_STATE_DISABLED      BIT(0)
263 #define CMN_STATE_TXN           BIT(1)
264
265 struct arm_cmn {
266         struct device *dev;
267         void __iomem *base;
268         unsigned int state;
269
270         enum cmn_revision rev;
271         enum cmn_model model;
272         u8 mesh_x;
273         u8 mesh_y;
274         u16 num_xps;
275         u16 num_dns;
276         bool multi_dtm;
277         u8 ports_used;
278         struct {
279                 unsigned int rsp_vc_num : 2;
280                 unsigned int dat_vc_num : 2;
281         };
282
283         struct arm_cmn_node *xps;
284         struct arm_cmn_node *dns;
285
286         struct arm_cmn_dtm *dtms;
287         struct arm_cmn_dtc *dtc;
288         unsigned int num_dtcs;
289
290         int cpu;
291         struct hlist_node cpuhp_node;
292
293         struct pmu pmu;
294         struct dentry *debug;
295 };
296
297 #define to_cmn(p)       container_of(p, struct arm_cmn, pmu)
298
299 static int arm_cmn_hp_state;
300
301 struct arm_cmn_nodeid {
302         u8 x;
303         u8 y;
304         u8 port;
305         u8 dev;
306 };
307
308 static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
309 {
310         int dim = max(cmn->mesh_x, cmn->mesh_y);
311
312         return dim > 4 ? 3 : 2;
313 }
314
315 static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
316 {
317         struct arm_cmn_nodeid nid;
318
319         if (cmn->num_xps == 1) {
320                 nid.x = 0;
321                 nid.y = 0;
322                 nid.port = CMN_NODEID_1x1_PID(id);
323                 nid.dev = CMN_NODEID_DEVID(id);
324         } else {
325                 int bits = arm_cmn_xyidbits(cmn);
326
327                 nid.x = CMN_NODEID_X(id, bits);
328                 nid.y = CMN_NODEID_Y(id, bits);
329                 if (cmn->ports_used & 0xc) {
330                         nid.port = CMN_NODEID_EXT_PID(id);
331                         nid.dev = CMN_NODEID_EXT_DEVID(id);
332                 } else {
333                         nid.port = CMN_NODEID_PID(id);
334                         nid.dev = CMN_NODEID_DEVID(id);
335                 }
336         }
337         return nid;
338 }
339
340 static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
341                                                const struct arm_cmn_node *dn)
342 {
343         struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
344         int xp_idx = cmn->mesh_x * nid.y + nid.x;
345
346         return cmn->xps + xp_idx;
347 }
348 static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
349                                          enum cmn_node_type type)
350 {
351         struct arm_cmn_node *dn;
352
353         for (dn = cmn->dns; dn->type; dn++)
354                 if (dn->type == type)
355                         return dn;
356         return NULL;
357 }
358
359 static struct dentry *arm_cmn_debugfs;
360
361 #ifdef CONFIG_DEBUG_FS
362 static const char *arm_cmn_device_type(u8 type)
363 {
364         switch(type) {
365                 case 0x01: return "  RN-I  |";
366                 case 0x02: return "  RN-D  |";
367                 case 0x04: return " RN-F_B |";
368                 case 0x05: return "RN-F_B_E|";
369                 case 0x06: return " RN-F_A |";
370                 case 0x07: return "RN-F_A_E|";
371                 case 0x08: return "  HN-T  |";
372                 case 0x09: return "  HN-I  |";
373                 case 0x0a: return "  HN-D  |";
374                 case 0x0c: return "  SN-F  |";
375                 case 0x0d: return "  SBSX  |";
376                 case 0x0e: return "  HN-F  |";
377                 case 0x0f: return " SN-F_E |";
378                 case 0x10: return " SN-F_D |";
379                 case 0x11: return "  CXHA  |";
380                 case 0x12: return "  CXRA  |";
381                 case 0x13: return "  CXRH  |";
382                 case 0x14: return " RN-F_D |";
383                 case 0x15: return "RN-F_D_E|";
384                 case 0x16: return " RN-F_C |";
385                 case 0x17: return "RN-F_C_E|";
386                 case 0x1c: return "  MTSX  |";
387                 default:   return "        |";
388         }
389 }
390
391 static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
392 {
393         struct arm_cmn *cmn = s->private;
394         struct arm_cmn_node *dn;
395
396         for (dn = cmn->dns; dn->type; dn++) {
397                 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
398
399                 if (dn->type == CMN_TYPE_XP)
400                         continue;
401                 /* Ignore the extra components that will overlap on some ports */
402                 if (dn->type < CMN_TYPE_HNI)
403                         continue;
404
405                 if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
406                         continue;
407
408                 seq_printf(s, "   #%-2d  |", dn->logid);
409                 return;
410         }
411         seq_puts(s, "        |");
412 }
413
414 static int arm_cmn_map_show(struct seq_file *s, void *data)
415 {
416         struct arm_cmn *cmn = s->private;
417         int x, y, p, pmax = fls(cmn->ports_used);
418
419         seq_puts(s, "     X");
420         for (x = 0; x < cmn->mesh_x; x++)
421                 seq_printf(s, "    %d    ", x);
422         seq_puts(s, "\nY P D+");
423         y = cmn->mesh_y;
424         while (y--) {
425                 int xp_base = cmn->mesh_x * y;
426                 u8 port[6][CMN_MAX_DIMENSION];
427
428                 for (x = 0; x < cmn->mesh_x; x++)
429                         seq_puts(s, "--------+");
430
431                 seq_printf(s, "\n%d    |", y);
432                 for (x = 0; x < cmn->mesh_x; x++) {
433                         struct arm_cmn_node *xp = cmn->xps + xp_base + x;
434                         void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
435
436                         port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
437                         port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
438                         port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
439                         port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
440                         port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
441                         port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
442                         seq_printf(s, " XP #%-2d |", xp_base + x);
443                 }
444
445                 seq_puts(s, "\n     |");
446                 for (x = 0; x < cmn->mesh_x; x++) {
447                         u8 dtc = cmn->xps[xp_base + x].dtc;
448
449                         if (dtc & (dtc - 1))
450                                 seq_puts(s, " DTC ?? |");
451                         else
452                                 seq_printf(s, " DTC %ld  |", __ffs(dtc));
453                 }
454                 seq_puts(s, "\n     |");
455                 for (x = 0; x < cmn->mesh_x; x++)
456                         seq_puts(s, "........|");
457
458                 for (p = 0; p < pmax; p++) {
459                         seq_printf(s, "\n  %d  |", p);
460                         for (x = 0; x < cmn->mesh_x; x++)
461                                 seq_puts(s, arm_cmn_device_type(port[p][x]));
462                         seq_puts(s, "\n    0|");
463                         for (x = 0; x < cmn->mesh_x; x++)
464                                 arm_cmn_show_logid(s, x, y, p, 0);
465                         seq_puts(s, "\n    1|");
466                         for (x = 0; x < cmn->mesh_x; x++)
467                                 arm_cmn_show_logid(s, x, y, p, 1);
468                 }
469                 seq_puts(s, "\n-----+");
470         }
471         for (x = 0; x < cmn->mesh_x; x++)
472                 seq_puts(s, "--------+");
473         seq_puts(s, "\n");
474         return 0;
475 }
476 DEFINE_SHOW_ATTRIBUTE(arm_cmn_map);
477
478 static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id)
479 {
480         const char *name  = "map";
481
482         if (id > 0)
483                 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id);
484         if (!name)
485                 return;
486
487         cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops);
488 }
489 #else
490 static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
491 #endif
492
493 struct arm_cmn_hw_event {
494         struct arm_cmn_node *dn;
495         u64 dtm_idx[2];
496         unsigned int dtc_idx;
497         u8 dtcs_used;
498         u8 num_dns;
499         u8 dtm_offset;
500 };
501
502 #define for_each_hw_dn(hw, dn, i) \
503         for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
504
505 static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
506 {
507         BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
508         return (struct arm_cmn_hw_event *)&event->hw;
509 }
510
511 static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val)
512 {
513         x[pos / 32] |= (u64)val << ((pos % 32) * 2);
514 }
515
516 static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
517 {
518         return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
519 }
520
521 struct arm_cmn_event_attr {
522         struct device_attribute attr;
523         enum cmn_model model;
524         enum cmn_node_type type;
525         u8 eventid;
526         u8 occupid;
527 };
528
529 struct arm_cmn_format_attr {
530         struct device_attribute attr;
531         u64 field;
532         int config;
533 };
534
535 #define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid)        \
536         (&((struct arm_cmn_event_attr[]) {{                             \
537                 .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL),  \
538                 .model = _model,                                        \
539                 .type = _type,                                          \
540                 .eventid = _eventid,                                    \
541                 .occupid = _occupid,                                    \
542         }})[0].attr.attr)
543
544 static bool arm_cmn_is_occup_event(enum cmn_model model,
545                                    enum cmn_node_type type, unsigned int id)
546 {
547         if (type == CMN_TYPE_DVM)
548                 return (model == CMN600 && id == 0x05) ||
549                        (model == CI700 && id == 0x0c);
550         return type == CMN_TYPE_HNF && id == 0x0f;
551 }
552
553 static ssize_t arm_cmn_event_show(struct device *dev,
554                                   struct device_attribute *attr, char *buf)
555 {
556         struct arm_cmn_event_attr *eattr;
557
558         eattr = container_of(attr, typeof(*eattr), attr);
559
560         if (eattr->type == CMN_TYPE_DTC)
561                 return sysfs_emit(buf, "type=0x%x\n", eattr->type);
562
563         if (eattr->type == CMN_TYPE_WP)
564                 return sysfs_emit(buf,
565                                   "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
566                                   eattr->type, eattr->eventid);
567
568         if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid))
569                 return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
570                                   eattr->type, eattr->eventid, eattr->occupid);
571
572         return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
573                           eattr->eventid);
574 }
575
576 static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
577                                              struct attribute *attr,
578                                              int unused)
579 {
580         struct device *dev = kobj_to_dev(kobj);
581         struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
582         struct arm_cmn_event_attr *eattr;
583
584         eattr = container_of(attr, typeof(*eattr), attr.attr);
585
586         if (!(eattr->model & cmn->model))
587                 return 0;
588
589         /* Watchpoints aren't nodes, so avoid confusion */
590         if (eattr->type == CMN_TYPE_WP)
591                 return attr->mode;
592
593         /* Hide XP events for unused interfaces/channels */
594         if (eattr->type == CMN_TYPE_XP) {
595                 unsigned int intf = (eattr->eventid >> 2) & 7;
596                 unsigned int chan = eattr->eventid >> 5;
597
598                 if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
599                         return 0;
600
601                 if (chan == 4 && cmn->model == CMN600)
602                         return 0;
603
604                 if ((chan == 5 && cmn->rsp_vc_num < 2) ||
605                     (chan == 6 && cmn->dat_vc_num < 2))
606                         return 0;
607         }
608
609         /* Revision-specific differences */
610         if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) {
611                 if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
612                         return 0;
613         }
614
615         if (!arm_cmn_node(cmn, eattr->type))
616                 return 0;
617
618         return attr->mode;
619 }
620
621 #define _CMN_EVENT_DVM(_model, _name, _event, _occup)           \
622         CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup)
623 #define CMN_EVENT_DTC(_name)                                    \
624         CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0)
625 #define _CMN_EVENT_HNF(_model, _name, _event, _occup)           \
626         CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup)
627 #define CMN_EVENT_HNI(_name, _event)                            \
628         CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0)
629 #define __CMN_EVENT_XP(_name, _event)                           \
630         CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0)
631 #define CMN_EVENT_SBSX(_model, _name, _event)                   \
632         CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0)
633 #define CMN_EVENT_RNID(_model, _name, _event)                   \
634         CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0)
635 #define CMN_EVENT_MTSX(_name, _event)                           \
636         CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0)
637
638 #define CMN_EVENT_DVM(_model, _name, _event)                    \
639         _CMN_EVENT_DVM(_model, _name, _event, 0)
640 #define CMN_EVENT_HNF(_model, _name, _event)                    \
641         _CMN_EVENT_HNF(_model, _name, _event, 0)
642 #define _CMN_EVENT_XP(_name, _event)                            \
643         __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)),         \
644         __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)),         \
645         __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)),         \
646         __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)),         \
647         __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)),        \
648         __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)),        \
649         __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)),        \
650         __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
651
652 /* Good thing there are only 3 fundamental XP events... */
653 #define CMN_EVENT_XP(_name, _event)                             \
654         _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)),        \
655         _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)),        \
656         _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)),        \
657         _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)),        \
658         _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)),        \
659         _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)),       \
660         _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5))
661
662
663 static struct attribute *arm_cmn_event_attrs[] = {
664         CMN_EVENT_DTC(cycles),
665
666         /*
667          * DVM node events conflict with HN-I events in the equivalent PMU
668          * slot, but our lazy short-cut of using the DTM counter index for
669          * the PMU index as well happens to avoid that by construction.
670          */
671         CMN_EVENT_DVM(CMN600, rxreq_dvmop,              0x01),
672         CMN_EVENT_DVM(CMN600, rxreq_dvmsync,            0x02),
673         CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
674         CMN_EVENT_DVM(CMN600, rxreq_retried,            0x04),
675         _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0),
676         _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1),
677         _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2),
678         CMN_EVENT_DVM(CI700, dvmop_tlbi,                0x01),
679         CMN_EVENT_DVM(CI700, dvmop_bpi,                 0x02),
680         CMN_EVENT_DVM(CI700, dvmop_pici,                0x03),
681         CMN_EVENT_DVM(CI700, dvmop_vici,                0x04),
682         CMN_EVENT_DVM(CI700, dvmsync,                   0x05),
683         CMN_EVENT_DVM(CI700, vmid_filtered,             0x06),
684         CMN_EVENT_DVM(CI700, rndop_filtered,            0x07),
685         CMN_EVENT_DVM(CI700, retry,                     0x08),
686         CMN_EVENT_DVM(CI700, txsnp_flitv,               0x09),
687         CMN_EVENT_DVM(CI700, txsnp_stall,               0x0a),
688         CMN_EVENT_DVM(CI700, trkfull,                   0x0b),
689         _CMN_EVENT_DVM(CI700, trk_occupancy_all,        0x0c, 0),
690         _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop,      0x0c, 1),
691         _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync,    0x0c, 2),
692
693         CMN_EVENT_HNF(CMN_ANY, cache_miss,              0x01),
694         CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access,     0x02),
695         CMN_EVENT_HNF(CMN_ANY, cache_fill,              0x03),
696         CMN_EVENT_HNF(CMN_ANY, pocq_retry,              0x04),
697         CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd,         0x05),
698         CMN_EVENT_HNF(CMN_ANY, sf_hit,                  0x06),
699         CMN_EVENT_HNF(CMN_ANY, sf_evictions,            0x07),
700         CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent,         0x08),
701         CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent,         0x09),
702         CMN_EVENT_HNF(CMN_ANY, slc_eviction,            0x0a),
703         CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way,    0x0b),
704         CMN_EVENT_HNF(CMN_ANY, mc_retries,              0x0c),
705         CMN_EVENT_HNF(CMN_ANY, mc_reqs,                 0x0d),
706         CMN_EVENT_HNF(CMN_ANY, qos_hh_retry,            0x0e),
707         _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0),
708         _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1),
709         _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2),
710         _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3),
711         _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4),
712         CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz,            0x10),
713         CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz,     0x11),
714         CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full,      0x12),
715         CMN_EVENT_HNF(CMN_ANY, cmp_adq_full,            0x13),
716         CMN_EVENT_HNF(CMN_ANY, txdat_stall,             0x14),
717         CMN_EVENT_HNF(CMN_ANY, txrsp_stall,             0x15),
718         CMN_EVENT_HNF(CMN_ANY, seq_full,                0x16),
719         CMN_EVENT_HNF(CMN_ANY, seq_hit,                 0x17),
720         CMN_EVENT_HNF(CMN_ANY, snp_sent,                0x18),
721         CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent,       0x19),
722         CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent,       0x1a),
723         CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk,          0x1b),
724         CMN_EVENT_HNF(CMN_ANY, intv_dirty,              0x1c),
725         CMN_EVENT_HNF(CMN_ANY, stash_snp_sent,          0x1d),
726         CMN_EVENT_HNF(CMN_ANY, stash_data_pull,         0x1e),
727         CMN_EVENT_HNF(CMN_ANY, snp_fwded,               0x1f),
728         CMN_EVENT_HNF(CI700, atomic_fwd,                0x20),
729         CMN_EVENT_HNF(CI700, mpam_hardlim,              0x21),
730         CMN_EVENT_HNF(CI700, mpam_softlim,              0x22),
731
732         CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl,              0x20),
733         CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl,              0x21),
734         CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl,              0x22),
735         CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl,              0x23),
736         CMN_EVENT_HNI(wdb_occ_cnt_ovfl,                 0x24),
737         CMN_EVENT_HNI(rrt_rd_alloc,                     0x25),
738         CMN_EVENT_HNI(rrt_wr_alloc,                     0x26),
739         CMN_EVENT_HNI(rdt_rd_alloc,                     0x27),
740         CMN_EVENT_HNI(rdt_wr_alloc,                     0x28),
741         CMN_EVENT_HNI(wdb_alloc,                        0x29),
742         CMN_EVENT_HNI(txrsp_retryack,                   0x2a),
743         CMN_EVENT_HNI(arvalid_no_arready,               0x2b),
744         CMN_EVENT_HNI(arready_no_arvalid,               0x2c),
745         CMN_EVENT_HNI(awvalid_no_awready,               0x2d),
746         CMN_EVENT_HNI(awready_no_awvalid,               0x2e),
747         CMN_EVENT_HNI(wvalid_no_wready,                 0x2f),
748         CMN_EVENT_HNI(txdat_stall,                      0x30),
749         CMN_EVENT_HNI(nonpcie_serialization,            0x31),
750         CMN_EVENT_HNI(pcie_serialization,               0x32),
751
752         CMN_EVENT_XP(txflit_valid,                      0x01),
753         CMN_EVENT_XP(txflit_stall,                      0x02),
754         CMN_EVENT_XP(partial_dat_flit,                  0x03),
755         /* We treat watchpoints as a special made-up class of XP events */
756         CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0),
757         CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0),
758
759         CMN_EVENT_SBSX(CMN_ANY, rd_req,                 0x01),
760         CMN_EVENT_SBSX(CMN_ANY, wr_req,                 0x02),
761         CMN_EVENT_SBSX(CMN_ANY, cmo_req,                0x03),
762         CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack,         0x04),
763         CMN_EVENT_SBSX(CMN_ANY, txdat_flitv,            0x05),
764         CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv,            0x06),
765         CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11),
766         CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12),
767         CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13),
768         CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl,       0x14),
769         CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
770         CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
771         CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl,         0x17),
772         CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready,     0x21),
773         CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready,     0x22),
774         CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready,       0x23),
775         CMN_EVENT_SBSX(CMN_ANY, txdat_stall,            0x24),
776         CMN_EVENT_SBSX(CMN_ANY, txrsp_stall,            0x25),
777
778         CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats,         0x01),
779         CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats,         0x02),
780         CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats,         0x03),
781         CMN_EVENT_RNID(CMN_ANY, rxdat_flits,            0x04),
782         CMN_EVENT_RNID(CMN_ANY, txdat_flits,            0x05),
783         CMN_EVENT_RNID(CMN_ANY, txreq_flits_total,      0x06),
784         CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried,    0x07),
785         CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl,           0x08),
786         CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl,           0x09),
787         CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed,   0x0a),
788         CMN_EVENT_RNID(CMN_ANY, wrcancel_sent,          0x0b),
789         CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats,         0x0c),
790         CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats,         0x0d),
791         CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats,         0x0e),
792         CMN_EVENT_RNID(CMN_ANY, rrt_alloc,              0x0f),
793         CMN_EVENT_RNID(CMN_ANY, wrt_alloc,              0x10),
794         CMN_EVENT_RNID(CMN600, rdb_unord,               0x11),
795         CMN_EVENT_RNID(CMN600, rdb_replay,              0x12),
796         CMN_EVENT_RNID(CMN600, rdb_hybrid,              0x13),
797         CMN_EVENT_RNID(CMN600, rdb_ord,                 0x14),
798         CMN_EVENT_RNID(CI700, padb_occ_ovfl,            0x11),
799         CMN_EVENT_RNID(CI700, rpdb_occ_ovfl,            0x12),
800         CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1,    0x13),
801         CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2,    0x14),
802         CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3,    0x15),
803         CMN_EVENT_RNID(CI700, wrt_throttled,            0x16),
804
805         CMN_EVENT_MTSX(tc_lookup,                       0x01),
806         CMN_EVENT_MTSX(tc_fill,                         0x02),
807         CMN_EVENT_MTSX(tc_miss,                         0x03),
808         CMN_EVENT_MTSX(tdb_forward,                     0x04),
809         CMN_EVENT_MTSX(tcq_hazard,                      0x05),
810         CMN_EVENT_MTSX(tcq_rd_alloc,                    0x06),
811         CMN_EVENT_MTSX(tcq_wr_alloc,                    0x07),
812         CMN_EVENT_MTSX(tcq_cmo_alloc,                   0x08),
813         CMN_EVENT_MTSX(axi_rd_req,                      0x09),
814         CMN_EVENT_MTSX(axi_wr_req,                      0x0a),
815         CMN_EVENT_MTSX(tcq_occ_cnt_ovfl,                0x0b),
816         CMN_EVENT_MTSX(tdb_occ_cnt_ovfl,                0x0c),
817
818         NULL
819 };
820
821 static const struct attribute_group arm_cmn_event_attrs_group = {
822         .name = "events",
823         .attrs = arm_cmn_event_attrs,
824         .is_visible = arm_cmn_event_attr_is_visible,
825 };
826
827 static ssize_t arm_cmn_format_show(struct device *dev,
828                                    struct device_attribute *attr, char *buf)
829 {
830         struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
831         int lo = __ffs(fmt->field), hi = __fls(fmt->field);
832
833         if (lo == hi)
834                 return sysfs_emit(buf, "config:%d\n", lo);
835
836         if (!fmt->config)
837                 return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
838
839         return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
840 }
841
842 #define _CMN_FORMAT_ATTR(_name, _cfg, _fld)                             \
843         (&((struct arm_cmn_format_attr[]) {{                            \
844                 .attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \
845                 .config = _cfg,                                         \
846                 .field = _fld,                                          \
847         }})[0].attr.attr)
848 #define CMN_FORMAT_ATTR(_name, _fld)    _CMN_FORMAT_ATTR(_name, 0, _fld)
849
850 static struct attribute *arm_cmn_format_attrs[] = {
851         CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE),
852         CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID),
853         CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID),
854         CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID),
855         CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID),
856
857         CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL),
858         CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL),
859         CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP),
860         CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE),
861         CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE),
862
863         _CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL),
864         _CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK),
865
866         NULL
867 };
868
869 static const struct attribute_group arm_cmn_format_attrs_group = {
870         .name = "format",
871         .attrs = arm_cmn_format_attrs,
872 };
873
874 static ssize_t arm_cmn_cpumask_show(struct device *dev,
875                                     struct device_attribute *attr, char *buf)
876 {
877         struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
878
879         return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
880 }
881
882 static struct device_attribute arm_cmn_cpumask_attr =
883                 __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);
884
885 static struct attribute *arm_cmn_cpumask_attrs[] = {
886         &arm_cmn_cpumask_attr.attr,
887         NULL,
888 };
889
890 static const struct attribute_group arm_cmn_cpumask_attr_group = {
891         .attrs = arm_cmn_cpumask_attrs,
892 };
893
894 static const struct attribute_group *arm_cmn_attr_groups[] = {
895         &arm_cmn_event_attrs_group,
896         &arm_cmn_format_attrs_group,
897         &arm_cmn_cpumask_attr_group,
898         NULL
899 };
900
901 static int arm_cmn_wp_idx(struct perf_event *event)
902 {
903         return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
904 }
905
906 static u32 arm_cmn_wp_config(struct perf_event *event)
907 {
908         u32 config;
909         u32 dev = CMN_EVENT_WP_DEV_SEL(event);
910         u32 chn = CMN_EVENT_WP_CHN_SEL(event);
911         u32 grp = CMN_EVENT_WP_GRP(event);
912         u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
913         u32 combine = CMN_EVENT_WP_COMBINE(event);
914         bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
915
916         config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
917                  FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
918                  FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
919                  FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
920         if (exc)
921                 config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
922                                       CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
923         if (combine && !grp)
924                 config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
925                                       CMN_DTM_WPn_CONFIG_WP_COMBINE;
926         return config;
927 }
928
929 static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
930 {
931         if (!cmn->state)
932                 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
933         cmn->state |= state;
934 }
935
936 static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
937 {
938         cmn->state &= ~state;
939         if (!cmn->state)
940                 writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
941                                cmn->dtc[0].base + CMN_DT_PMCR);
942 }
943
944 static void arm_cmn_pmu_enable(struct pmu *pmu)
945 {
946         arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED);
947 }
948
949 static void arm_cmn_pmu_disable(struct pmu *pmu)
950 {
951         arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED);
952 }
953
954 static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
955                             bool snapshot)
956 {
957         struct arm_cmn_dtm *dtm = NULL;
958         struct arm_cmn_node *dn;
959         unsigned int i, offset, dtm_idx;
960         u64 reg, count = 0;
961
962         offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
963         for_each_hw_dn(hw, dn, i) {
964                 if (dtm != &cmn->dtms[dn->dtm]) {
965                         dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
966                         reg = readq_relaxed(dtm->base + offset);
967                 }
968                 dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
969                 count += (u16)(reg >> (dtm_idx * 16));
970         }
971         return count;
972 }
973
974 static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
975 {
976         u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
977
978         writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
979         return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
980 }
981
982 static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
983 {
984         u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
985
986         val = readl_relaxed(dtc->base + pmevcnt);
987         writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
988         return val - CMN_COUNTER_INIT;
989 }
990
991 static void arm_cmn_init_counter(struct perf_event *event)
992 {
993         struct arm_cmn *cmn = to_cmn(event->pmu);
994         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
995         unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx);
996         u64 count;
997
998         for (i = 0; hw->dtcs_used & (1U << i); i++) {
999                 writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt);
1000                 cmn->dtc[i].counters[hw->dtc_idx] = event;
1001         }
1002
1003         count = arm_cmn_read_dtm(cmn, hw, false);
1004         local64_set(&event->hw.prev_count, count);
1005 }
1006
1007 static void arm_cmn_event_read(struct perf_event *event)
1008 {
1009         struct arm_cmn *cmn = to_cmn(event->pmu);
1010         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1011         u64 delta, new, prev;
1012         unsigned long flags;
1013         unsigned int i;
1014
1015         if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) {
1016                 i = __ffs(hw->dtcs_used);
1017                 delta = arm_cmn_read_cc(cmn->dtc + i);
1018                 local64_add(delta, &event->count);
1019                 return;
1020         }
1021         new = arm_cmn_read_dtm(cmn, hw, false);
1022         prev = local64_xchg(&event->hw.prev_count, new);
1023
1024         delta = new - prev;
1025
1026         local_irq_save(flags);
1027         for (i = 0; hw->dtcs_used & (1U << i); i++) {
1028                 new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx);
1029                 delta += new << 16;
1030         }
1031         local_irq_restore(flags);
1032         local64_add(delta, &event->count);
1033 }
1034
1035 static void arm_cmn_event_start(struct perf_event *event, int flags)
1036 {
1037         struct arm_cmn *cmn = to_cmn(event->pmu);
1038         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1039         struct arm_cmn_node *dn;
1040         enum cmn_node_type type = CMN_EVENT_TYPE(event);
1041         int i;
1042
1043         if (type == CMN_TYPE_DTC) {
1044                 i = __ffs(hw->dtcs_used);
1045                 writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
1046                 cmn->dtc[i].cc_active = true;
1047         } else if (type == CMN_TYPE_WP) {
1048                 int wp_idx = arm_cmn_wp_idx(event);
1049                 u64 val = CMN_EVENT_WP_VAL(event);
1050                 u64 mask = CMN_EVENT_WP_MASK(event);
1051
1052                 for_each_hw_dn(hw, dn, i) {
1053                         void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
1054
1055                         writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
1056                         writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
1057                 }
1058         } else for_each_hw_dn(hw, dn, i) {
1059                 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1060
1061                 dn->event[dtm_idx] = CMN_EVENT_EVENTID(event);
1062                 writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
1063         }
1064 }
1065
1066 static void arm_cmn_event_stop(struct perf_event *event, int flags)
1067 {
1068         struct arm_cmn *cmn = to_cmn(event->pmu);
1069         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1070         struct arm_cmn_node *dn;
1071         enum cmn_node_type type = CMN_EVENT_TYPE(event);
1072         int i;
1073
1074         if (type == CMN_TYPE_DTC) {
1075                 i = __ffs(hw->dtcs_used);
1076                 cmn->dtc[i].cc_active = false;
1077         } else if (type == CMN_TYPE_WP) {
1078                 int wp_idx = arm_cmn_wp_idx(event);
1079
1080                 for_each_hw_dn(hw, dn, i) {
1081                         void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
1082
1083                         writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
1084                         writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
1085                 }
1086         } else for_each_hw_dn(hw, dn, i) {
1087                 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1088
1089                 dn->event[dtm_idx] = 0;
1090                 writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
1091         }
1092
1093         arm_cmn_event_read(event);
1094 }
1095
1096 struct arm_cmn_val {
1097         u8 dtm_count[CMN_MAX_DTMS];
1098         u8 occupid[CMN_MAX_DTMS];
1099         u8 wp[CMN_MAX_DTMS][4];
1100         int dtc_count;
1101         bool cycles;
1102 };
1103
1104 static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
1105                                   struct perf_event *event)
1106 {
1107         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1108         struct arm_cmn_node *dn;
1109         enum cmn_node_type type;
1110         int i;
1111         u8 occupid;
1112
1113         if (is_software_event(event))
1114                 return;
1115
1116         type = CMN_EVENT_TYPE(event);
1117         if (type == CMN_TYPE_DTC) {
1118                 val->cycles = true;
1119                 return;
1120         }
1121
1122         val->dtc_count++;
1123         if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
1124                 occupid = CMN_EVENT_OCCUPID(event) + 1;
1125         else
1126                 occupid = 0;
1127
1128         for_each_hw_dn(hw, dn, i) {
1129                 int wp_idx, dtm = dn->dtm;
1130
1131                 val->dtm_count[dtm]++;
1132                 val->occupid[dtm] = occupid;
1133
1134                 if (type != CMN_TYPE_WP)
1135                         continue;
1136
1137                 wp_idx = arm_cmn_wp_idx(event);
1138                 val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
1139         }
1140 }
1141
1142 static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
1143 {
1144         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1145         struct arm_cmn_node *dn;
1146         struct perf_event *sibling, *leader = event->group_leader;
1147         enum cmn_node_type type;
1148         struct arm_cmn_val *val;
1149         int i, ret = -EINVAL;
1150         u8 occupid;
1151
1152         if (leader == event)
1153                 return 0;
1154
1155         if (event->pmu != leader->pmu && !is_software_event(leader))
1156                 return -EINVAL;
1157
1158         val = kzalloc(sizeof(*val), GFP_KERNEL);
1159         if (!val)
1160                 return -ENOMEM;
1161
1162         arm_cmn_val_add_event(cmn, val, leader);
1163         for_each_sibling_event(sibling, leader)
1164                 arm_cmn_val_add_event(cmn, val, sibling);
1165
1166         type = CMN_EVENT_TYPE(event);
1167         if (type == CMN_TYPE_DTC) {
1168                 ret = val->cycles ? -EINVAL : 0;
1169                 goto done;
1170         }
1171
1172         if (val->dtc_count == CMN_DT_NUM_COUNTERS)
1173                 goto done;
1174
1175         if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
1176                 occupid = CMN_EVENT_OCCUPID(event) + 1;
1177         else
1178                 occupid = 0;
1179
1180         for_each_hw_dn(hw, dn, i) {
1181                 int wp_idx, wp_cmb, dtm = dn->dtm;
1182
1183                 if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
1184                         goto done;
1185
1186                 if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm])
1187                         goto done;
1188
1189                 if (type != CMN_TYPE_WP)
1190                         continue;
1191
1192                 wp_idx = arm_cmn_wp_idx(event);
1193                 if (val->wp[dtm][wp_idx])
1194                         goto done;
1195
1196                 wp_cmb = val->wp[dtm][wp_idx ^ 1];
1197                 if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
1198                         goto done;
1199         }
1200
1201         ret = 0;
1202 done:
1203         kfree(val);
1204         return ret;
1205 }
1206
1207 static int arm_cmn_event_init(struct perf_event *event)
1208 {
1209         struct arm_cmn *cmn = to_cmn(event->pmu);
1210         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1211         struct arm_cmn_node *dn;
1212         enum cmn_node_type type;
1213         bool bynodeid;
1214         u16 nodeid, eventid;
1215
1216         if (event->attr.type != event->pmu->type)
1217                 return -ENOENT;
1218
1219         if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1220                 return -EINVAL;
1221
1222         event->cpu = cmn->cpu;
1223         if (event->cpu < 0)
1224                 return -EINVAL;
1225
1226         type = CMN_EVENT_TYPE(event);
1227         /* DTC events (i.e. cycles) already have everything they need */
1228         if (type == CMN_TYPE_DTC)
1229                 return 0;
1230
1231         /* For watchpoints we need the actual XP node here */
1232         if (type == CMN_TYPE_WP) {
1233                 type = CMN_TYPE_XP;
1234                 /* ...and we need a "real" direction */
1235                 eventid = CMN_EVENT_EVENTID(event);
1236                 if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
1237                         return -EINVAL;
1238                 /* ...but the DTM may depend on which port we're watching */
1239                 if (cmn->multi_dtm)
1240                         hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
1241         }
1242
1243         bynodeid = CMN_EVENT_BYNODEID(event);
1244         nodeid = CMN_EVENT_NODEID(event);
1245
1246         hw->dn = arm_cmn_node(cmn, type);
1247         if (!hw->dn)
1248                 return -EINVAL;
1249         for (dn = hw->dn; dn->type == type; dn++) {
1250                 if (bynodeid && dn->id != nodeid) {
1251                         hw->dn++;
1252                         continue;
1253                 }
1254                 hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
1255                 hw->num_dns++;
1256                 if (bynodeid)
1257                         break;
1258         }
1259
1260         if (!hw->num_dns) {
1261                 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
1262
1263                 dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
1264                         nodeid, nid.x, nid.y, nid.port, nid.dev, type);
1265                 return -EINVAL;
1266         }
1267
1268         return arm_cmn_validate_group(cmn, event);
1269 }
1270
1271 static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
1272                                 int i)
1273 {
1274         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1275         enum cmn_node_type type = CMN_EVENT_TYPE(event);
1276
1277         while (i--) {
1278                 struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
1279                 unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1280
1281                 if (type == CMN_TYPE_WP)
1282                         dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
1283
1284                 if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
1285                         hw->dn[i].occupid_count--;
1286
1287                 dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
1288                 writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
1289         }
1290         memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
1291
1292         for (i = 0; hw->dtcs_used & (1U << i); i++)
1293                 cmn->dtc[i].counters[hw->dtc_idx] = NULL;
1294 }
1295
1296 static int arm_cmn_event_add(struct perf_event *event, int flags)
1297 {
1298         struct arm_cmn *cmn = to_cmn(event->pmu);
1299         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1300         struct arm_cmn_dtc *dtc = &cmn->dtc[0];
1301         struct arm_cmn_node *dn;
1302         enum cmn_node_type type = CMN_EVENT_TYPE(event);
1303         unsigned int i, dtc_idx, input_sel;
1304
1305         if (type == CMN_TYPE_DTC) {
1306                 i = 0;
1307                 while (cmn->dtc[i].cycles)
1308                         if (++i == cmn->num_dtcs)
1309                                 return -ENOSPC;
1310
1311                 cmn->dtc[i].cycles = event;
1312                 hw->dtc_idx = CMN_DT_NUM_COUNTERS;
1313                 hw->dtcs_used = 1U << i;
1314
1315                 if (flags & PERF_EF_START)
1316                         arm_cmn_event_start(event, 0);
1317                 return 0;
1318         }
1319
1320         /* Grab a free global counter first... */
1321         dtc_idx = 0;
1322         while (dtc->counters[dtc_idx])
1323                 if (++dtc_idx == CMN_DT_NUM_COUNTERS)
1324                         return -ENOSPC;
1325
1326         hw->dtc_idx = dtc_idx;
1327
1328         /* ...then the local counters to feed it. */
1329         for_each_hw_dn(hw, dn, i) {
1330                 struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
1331                 unsigned int dtm_idx, shift;
1332                 u64 reg;
1333
1334                 dtm_idx = 0;
1335                 while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
1336                         if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
1337                                 goto free_dtms;
1338
1339                 if (type == CMN_TYPE_XP) {
1340                         input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
1341                 } else if (type == CMN_TYPE_WP) {
1342                         int tmp, wp_idx = arm_cmn_wp_idx(event);
1343                         u32 cfg = arm_cmn_wp_config(event);
1344
1345                         if (dtm->wp_event[wp_idx] >= 0)
1346                                 goto free_dtms;
1347
1348                         tmp = dtm->wp_event[wp_idx ^ 1];
1349                         if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
1350                                         CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
1351                                 goto free_dtms;
1352
1353                         input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
1354                         dtm->wp_event[wp_idx] = dtc_idx;
1355                         writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
1356                 } else {
1357                         struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
1358
1359                         if (cmn->multi_dtm)
1360                                 nid.port %= 2;
1361
1362                         input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
1363                                     (nid.port << 4) + (nid.dev << 2);
1364
1365                         if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) {
1366                                 u8 occupid = CMN_EVENT_OCCUPID(event);
1367
1368                                 if (dn->occupid_count == 0) {
1369                                         dn->occupid_val = occupid;
1370                                         writel_relaxed(occupid,
1371                                                        dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
1372                                 } else if (dn->occupid_val != occupid) {
1373                                         goto free_dtms;
1374                                 }
1375                                 dn->occupid_count++;
1376                         }
1377                 }
1378
1379                 arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
1380
1381                 dtm->input_sel[dtm_idx] = input_sel;
1382                 shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
1383                 dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
1384                 dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
1385                 dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
1386                 reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
1387                 writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
1388         }
1389
1390         /* Go go go! */
1391         arm_cmn_init_counter(event);
1392
1393         if (flags & PERF_EF_START)
1394                 arm_cmn_event_start(event, 0);
1395
1396         return 0;
1397
1398 free_dtms:
1399         arm_cmn_event_clear(cmn, event, i);
1400         return -ENOSPC;
1401 }
1402
1403 static void arm_cmn_event_del(struct perf_event *event, int flags)
1404 {
1405         struct arm_cmn *cmn = to_cmn(event->pmu);
1406         struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1407         enum cmn_node_type type = CMN_EVENT_TYPE(event);
1408
1409         arm_cmn_event_stop(event, PERF_EF_UPDATE);
1410
1411         if (type == CMN_TYPE_DTC)
1412                 cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL;
1413         else
1414                 arm_cmn_event_clear(cmn, event, hw->num_dns);
1415 }
1416
1417 /*
1418  * We stop the PMU for both add and read, to avoid skew across DTM counters.
1419  * In theory we could use snapshots to read without stopping, but then it
1420  * becomes a lot trickier to deal with overlow and racing against interrupts,
1421  * plus it seems they don't work properly on some hardware anyway :(
1422  */
1423 static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags)
1424 {
1425         arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN);
1426 }
1427
1428 static void arm_cmn_end_txn(struct pmu *pmu)
1429 {
1430         arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN);
1431 }
1432
1433 static int arm_cmn_commit_txn(struct pmu *pmu)
1434 {
1435         arm_cmn_end_txn(pmu);
1436         return 0;
1437 }
1438
1439 static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
1440 {
1441         unsigned int i;
1442
1443         perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
1444         for (i = 0; i < cmn->num_dtcs; i++)
1445                 irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
1446         cmn->cpu = cpu;
1447 }
1448
1449 static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
1450 {
1451         struct arm_cmn *cmn;
1452         int node;
1453
1454         cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
1455         node = dev_to_node(cmn->dev);
1456         if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
1457                 arm_cmn_migrate(cmn, cpu);
1458         return 0;
1459 }
1460
1461 static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
1462 {
1463         struct arm_cmn *cmn;
1464         unsigned int target;
1465         int node;
1466         cpumask_t mask;
1467
1468         cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
1469         if (cpu != cmn->cpu)
1470                 return 0;
1471
1472         node = dev_to_node(cmn->dev);
1473         if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
1474             cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
1475                 target = cpumask_any(&mask);
1476         else
1477                 target = cpumask_any_but(cpu_online_mask, cpu);
1478         if (target < nr_cpu_ids)
1479                 arm_cmn_migrate(cmn, target);
1480         return 0;
1481 }
1482
1483 static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
1484 {
1485         struct arm_cmn_dtc *dtc = dev_id;
1486         irqreturn_t ret = IRQ_NONE;
1487
1488         for (;;) {
1489                 u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
1490                 u64 delta;
1491                 int i;
1492
1493                 for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
1494                         if (status & (1U << i)) {
1495                                 ret = IRQ_HANDLED;
1496                                 if (WARN_ON(!dtc->counters[i]))
1497                                         continue;
1498                                 delta = (u64)arm_cmn_read_counter(dtc, i) << 16;
1499                                 local64_add(delta, &dtc->counters[i]->count);
1500                         }
1501                 }
1502
1503                 if (status & (1U << CMN_DT_NUM_COUNTERS)) {
1504                         ret = IRQ_HANDLED;
1505                         if (dtc->cc_active && !WARN_ON(!dtc->cycles)) {
1506                                 delta = arm_cmn_read_cc(dtc);
1507                                 local64_add(delta, &dtc->cycles->count);
1508                         }
1509                 }
1510
1511                 writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);
1512
1513                 if (!dtc->irq_friend)
1514                         return ret;
1515                 dtc += dtc->irq_friend;
1516         }
1517 }
1518
1519 /* We can reasonably accommodate DTCs of the same CMN sharing IRQs */
1520 static int arm_cmn_init_irqs(struct arm_cmn *cmn)
1521 {
1522         int i, j, irq, err;
1523
1524         for (i = 0; i < cmn->num_dtcs; i++) {
1525                 irq = cmn->dtc[i].irq;
1526                 for (j = i; j--; ) {
1527                         if (cmn->dtc[j].irq == irq) {
1528                                 cmn->dtc[j].irq_friend = i - j;
1529                                 goto next;
1530                         }
1531                 }
1532                 err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq,
1533                                        IRQF_NOBALANCING | IRQF_NO_THREAD,
1534                                        dev_name(cmn->dev), &cmn->dtc[i]);
1535                 if (err)
1536                         return err;
1537
1538                 err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
1539                 if (err)
1540                         return err;
1541         next:
1542                 ; /* isn't C great? */
1543         }
1544         return 0;
1545 }
1546
1547 static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
1548 {
1549         int i;
1550
1551         dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
1552         dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
1553         for (i = 0; i < 4; i++) {
1554                 dtm->wp_event[i] = -1;
1555                 writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
1556                 writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i));
1557         }
1558 }
1559
1560 static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
1561 {
1562         struct arm_cmn_dtc *dtc = cmn->dtc + idx;
1563
1564         dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
1565         dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
1566         if (dtc->irq < 0)
1567                 return dtc->irq;
1568
1569         writel_relaxed(0, dtc->base + CMN_DT_PMCR);
1570         writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
1571         writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
1572
1573         return 0;
1574 }
1575
1576 static int arm_cmn_node_cmp(const void *a, const void *b)
1577 {
1578         const struct arm_cmn_node *dna = a, *dnb = b;
1579         int cmp;
1580
1581         cmp = dna->type - dnb->type;
1582         if (!cmp)
1583                 cmp = dna->logid - dnb->logid;
1584         return cmp;
1585 }
1586
1587 static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
1588 {
1589         struct arm_cmn_node *dn, *xp;
1590         int dtc_idx = 0;
1591         u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
1592
1593         cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
1594         if (!cmn->dtc)
1595                 return -ENOMEM;
1596
1597         sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL);
1598
1599         cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
1600
1601         for (dn = cmn->dns; dn->type; dn++) {
1602                 if (dn->type == CMN_TYPE_XP) {
1603                         dn->dtc &= dtcs_present;
1604                         continue;
1605                 }
1606
1607                 xp = arm_cmn_node_to_xp(cmn, dn);
1608                 dn->dtm = xp->dtm;
1609                 if (cmn->multi_dtm)
1610                         dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
1611
1612                 if (dn->type == CMN_TYPE_DTC) {
1613                         int err;
1614                         /* We do at least know that a DTC's XP must be in that DTC's domain */
1615                         if (xp->dtc == 0xf)
1616                                 xp->dtc = 1 << dtc_idx;
1617                         err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
1618                         if (err)
1619                                 return err;
1620                 }
1621
1622                 /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
1623                 if (dn->type == CMN_TYPE_RND)
1624                         dn->type = CMN_TYPE_RNI;
1625         }
1626
1627         writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
1628
1629         return 0;
1630 }
1631
1632 static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
1633 {
1634         int level;
1635         u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO);
1636
1637         node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
1638         node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
1639         node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
1640
1641         node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;
1642
1643         if (node->type == CMN_TYPE_CFG)
1644                 level = 0;
1645         else if (node->type == CMN_TYPE_XP)
1646                 level = 1;
1647         else
1648                 level = 2;
1649
1650         dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n",
1651                         (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
1652                         node->type, node->logid, offset);
1653 }
1654
1655 static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
1656 {
1657         void __iomem *cfg_region;
1658         struct arm_cmn_node cfg, *dn;
1659         struct arm_cmn_dtm *dtm;
1660         u16 child_count, child_poff;
1661         u32 xp_offset[CMN_MAX_XPS];
1662         u64 reg;
1663         int i, j;
1664         size_t sz;
1665
1666         arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
1667         if (cfg.type != CMN_TYPE_CFG)
1668                 return -ENODEV;
1669
1670         cfg_region = cmn->base + rgn_offset;
1671         reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
1672         cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
1673
1674         reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
1675         cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
1676         cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
1677         cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
1678
1679         reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
1680         child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
1681         child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
1682
1683         cmn->num_xps = child_count;
1684         cmn->num_dns = cmn->num_xps;
1685
1686         /* Pass 1: visit the XPs, enumerate their children */
1687         for (i = 0; i < cmn->num_xps; i++) {
1688                 reg = readq_relaxed(cfg_region + child_poff + i * 8);
1689                 xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;
1690
1691                 reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
1692                 cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
1693         }
1694
1695         /* Cheeky +1 to help terminate pointer-based iteration later */
1696         dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL);
1697         if (!dn)
1698                 return -ENOMEM;
1699
1700         /* Initial safe upper bound on DTMs for any possible mesh layout */
1701         i = cmn->num_xps;
1702         if (cmn->multi_dtm)
1703                 i += cmn->num_xps + 1;
1704         dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
1705         if (!dtm)
1706                 return -ENOMEM;
1707
1708         /* Pass 2: now we can actually populate the nodes */
1709         cmn->dns = dn;
1710         cmn->dtms = dtm;
1711         for (i = 0; i < cmn->num_xps; i++) {
1712                 void __iomem *xp_region = cmn->base + xp_offset[i];
1713                 struct arm_cmn_node *xp = dn++;
1714                 unsigned int xp_ports = 0;
1715
1716                 arm_cmn_init_node_info(cmn, xp_offset[i], xp);
1717                 /*
1718                  * Thanks to the order in which XP logical IDs seem to be
1719                  * assigned, we can handily infer the mesh X dimension by
1720                  * looking out for the XP at (0,1) without needing to know
1721                  * the exact node ID format, which we can later derive.
1722                  */
1723                 if (xp->id == (1 << 3))
1724                         cmn->mesh_x = xp->logid;
1725
1726                 if (cmn->model == CMN600)
1727                         xp->dtc = 0xf;
1728                 else
1729                         xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
1730
1731                 xp->dtm = dtm - cmn->dtms;
1732                 arm_cmn_init_dtm(dtm++, xp, 0);
1733                 /*
1734                  * Keeping track of connected ports will let us filter out
1735                  * unnecessary XP events easily. We can also reliably infer the
1736                  * "extra device ports" configuration for the node ID format
1737                  * from this, since in that case we will see at least one XP
1738                  * with port 2 connected, for the HN-D.
1739                  */
1740                 if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
1741                         xp_ports |= BIT(0);
1742                 if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
1743                         xp_ports |= BIT(1);
1744                 if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
1745                         xp_ports |= BIT(2);
1746                 if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
1747                         xp_ports |= BIT(3);
1748                 if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
1749                         xp_ports |= BIT(4);
1750                 if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
1751                         xp_ports |= BIT(5);
1752
1753                 if (cmn->multi_dtm && (xp_ports & 0xc))
1754                         arm_cmn_init_dtm(dtm++, xp, 1);
1755                 if (cmn->multi_dtm && (xp_ports & 0x30))
1756                         arm_cmn_init_dtm(dtm++, xp, 2);
1757
1758                 cmn->ports_used |= xp_ports;
1759
1760                 reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
1761                 child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
1762                 child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
1763
1764                 for (j = 0; j < child_count; j++) {
1765                         reg = readq_relaxed(xp_region + child_poff + j * 8);
1766                         /*
1767                          * Don't even try to touch anything external, since in general
1768                          * we haven't a clue how to power up arbitrary CHI requesters.
1769                          * As of CMN-600r1 these could only be RN-SAMs or CXLAs,
1770                          * neither of which have any PMU events anyway.
1771                          * (Actually, CXLAs do seem to have grown some events in r1p2,
1772                          * but they don't go to regular XP DTMs, and they depend on
1773                          * secure configuration which we can't easily deal with)
1774                          */
1775                         if (reg & CMN_CHILD_NODE_EXTERNAL) {
1776                                 dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
1777                                 continue;
1778                         }
1779
1780                         arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
1781
1782                         switch (dn->type) {
1783                         case CMN_TYPE_DTC:
1784                                 cmn->num_dtcs++;
1785                                 dn++;
1786                                 break;
1787                         /* These guys have PMU events */
1788                         case CMN_TYPE_DVM:
1789                         case CMN_TYPE_HNI:
1790                         case CMN_TYPE_HNF:
1791                         case CMN_TYPE_SBSX:
1792                         case CMN_TYPE_RNI:
1793                         case CMN_TYPE_RND:
1794                         case CMN_TYPE_MTSX:
1795                         case CMN_TYPE_CXRA:
1796                         case CMN_TYPE_CXHA:
1797                                 dn++;
1798                                 break;
1799                         /* Nothing to see here */
1800                         case CMN_TYPE_MPAM_S:
1801                         case CMN_TYPE_MPAM_NS:
1802                         case CMN_TYPE_RNSAM:
1803                         case CMN_TYPE_CXLA:
1804                                 break;
1805                         /* Something has gone horribly wrong */
1806                         default:
1807                                 dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
1808                                 return -ENODEV;
1809                         }
1810                 }
1811         }
1812
1813         /* Correct for any nodes we skipped */
1814         cmn->num_dns = dn - cmn->dns;
1815
1816         sz = (void *)(dn + 1) - (void *)cmn->dns;
1817         dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
1818         if (dn)
1819                 cmn->dns = dn;
1820
1821         sz = (void *)dtm - (void *)cmn->dtms;
1822         dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
1823         if (dtm)
1824                 cmn->dtms = dtm;
1825
1826         /*
1827          * If mesh_x wasn't set during discovery then we never saw
1828          * an XP at (0,1), thus we must have an Nx1 configuration.
1829          */
1830         if (!cmn->mesh_x)
1831                 cmn->mesh_x = cmn->num_xps;
1832         cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
1833
1834         /* 1x1 config plays havoc with XP event encodings */
1835         if (cmn->num_xps == 1)
1836                 dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
1837
1838         dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
1839         reg = cmn->ports_used;
1840         dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
1841                 cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
1842                 cmn->multi_dtm ? ", multi-DTM" : "");
1843
1844         return 0;
1845 }
1846
1847 static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
1848 {
1849         struct resource *cfg, *root;
1850
1851         cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1852         if (!cfg)
1853                 return -EINVAL;
1854
1855         root = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1856         if (!root)
1857                 return -EINVAL;
1858
1859         if (!resource_contains(cfg, root))
1860                 swap(cfg, root);
1861         /*
1862          * Note that devm_ioremap_resource() is dumb and won't let the platform
1863          * device claim cfg when the ACPI companion device has already claimed
1864          * root within it. But since they *are* already both claimed in the
1865          * appropriate name, we don't really need to do it again here anyway.
1866          */
1867         cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg));
1868         if (!cmn->base)
1869                 return -ENOMEM;
1870
1871         return root->start - cfg->start;
1872 }
1873
1874 static int arm_cmn600_of_probe(struct device_node *np)
1875 {
1876         u32 rootnode;
1877
1878         return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode;
1879 }
1880
1881 static int arm_cmn_probe(struct platform_device *pdev)
1882 {
1883         struct arm_cmn *cmn;
1884         const char *name;
1885         static atomic_t id;
1886         int err, rootnode, this_id;
1887
1888         cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
1889         if (!cmn)
1890                 return -ENOMEM;
1891
1892         cmn->dev = &pdev->dev;
1893         cmn->model = (unsigned long)device_get_match_data(cmn->dev);
1894         platform_set_drvdata(pdev, cmn);
1895
1896         if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) {
1897                 rootnode = arm_cmn600_acpi_probe(pdev, cmn);
1898         } else {
1899                 rootnode = 0;
1900                 cmn->base = devm_platform_ioremap_resource(pdev, 0);
1901                 if (IS_ERR(cmn->base))
1902                         return PTR_ERR(cmn->base);
1903                 if (cmn->model == CMN600)
1904                         rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
1905         }
1906         if (rootnode < 0)
1907                 return rootnode;
1908
1909         err = arm_cmn_discover(cmn, rootnode);
1910         if (err)
1911                 return err;
1912
1913         err = arm_cmn_init_dtcs(cmn);
1914         if (err)
1915                 return err;
1916
1917         err = arm_cmn_init_irqs(cmn);
1918         if (err)
1919                 return err;
1920
1921         cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
1922         cmn->pmu = (struct pmu) {
1923                 .module = THIS_MODULE,
1924                 .attr_groups = arm_cmn_attr_groups,
1925                 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1926                 .task_ctx_nr = perf_invalid_context,
1927                 .pmu_enable = arm_cmn_pmu_enable,
1928                 .pmu_disable = arm_cmn_pmu_disable,
1929                 .event_init = arm_cmn_event_init,
1930                 .add = arm_cmn_event_add,
1931                 .del = arm_cmn_event_del,
1932                 .start = arm_cmn_event_start,
1933                 .stop = arm_cmn_event_stop,
1934                 .read = arm_cmn_event_read,
1935                 .start_txn = arm_cmn_start_txn,
1936                 .commit_txn = arm_cmn_commit_txn,
1937                 .cancel_txn = arm_cmn_end_txn,
1938         };
1939
1940         this_id = atomic_fetch_inc(&id);
1941         name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
1942         if (!name)
1943                 return -ENOMEM;
1944
1945         err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
1946         if (err)
1947                 return err;
1948
1949         err = perf_pmu_register(&cmn->pmu, name, -1);
1950         if (err)
1951                 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
1952         else
1953                 arm_cmn_debugfs_init(cmn, this_id);
1954
1955         return err;
1956 }
1957
1958 static int arm_cmn_remove(struct platform_device *pdev)
1959 {
1960         struct arm_cmn *cmn = platform_get_drvdata(pdev);
1961
1962         writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
1963
1964         perf_pmu_unregister(&cmn->pmu);
1965         cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
1966         debugfs_remove(cmn->debug);
1967         return 0;
1968 }
1969
1970 #ifdef CONFIG_OF
1971 static const struct of_device_id arm_cmn_of_match[] = {
1972         { .compatible = "arm,cmn-600", .data = (void *)CMN600 },
1973         { .compatible = "arm,ci-700", .data = (void *)CI700 },
1974         {}
1975 };
1976 MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
1977 #endif
1978
1979 #ifdef CONFIG_ACPI
1980 static const struct acpi_device_id arm_cmn_acpi_match[] = {
1981         { "ARMHC600", CMN600 },
1982         {}
1983 };
1984 MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
1985 #endif
1986
1987 static struct platform_driver arm_cmn_driver = {
1988         .driver = {
1989                 .name = "arm-cmn",
1990                 .of_match_table = of_match_ptr(arm_cmn_of_match),
1991                 .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
1992         },
1993         .probe = arm_cmn_probe,
1994         .remove = arm_cmn_remove,
1995 };
1996
1997 static int __init arm_cmn_init(void)
1998 {
1999         int ret;
2000
2001         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
2002                                       "perf/arm/cmn:online",
2003                                       arm_cmn_pmu_online_cpu,
2004                                       arm_cmn_pmu_offline_cpu);
2005         if (ret < 0)
2006                 return ret;
2007
2008         arm_cmn_hp_state = ret;
2009         arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL);
2010
2011         ret = platform_driver_register(&arm_cmn_driver);
2012         if (ret) {
2013                 cpuhp_remove_multi_state(arm_cmn_hp_state);
2014                 debugfs_remove(arm_cmn_debugfs);
2015         }
2016         return ret;
2017 }
2018
2019 static void __exit arm_cmn_exit(void)
2020 {
2021         platform_driver_unregister(&arm_cmn_driver);
2022         cpuhp_remove_multi_state(arm_cmn_hp_state);
2023         debugfs_remove(arm_cmn_debugfs);
2024 }
2025
2026 module_init(arm_cmn_init);
2027 module_exit(arm_cmn_exit);
2028
2029 MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
2030 MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
2031 MODULE_LICENSE("GPL v2");