1 // SPDX-License-Identifier: GPL-2.0-only
3 * ARMv8 PMUv3 Performance Events handling code.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This code is based heavily on the ARMv7 perf event code.
11 #include <asm/irq_regs.h>
12 #include <asm/perf_event.h>
13 #include <asm/sysreg.h>
16 #include <linux/acpi.h>
17 #include <linux/clocksource.h>
18 #include <linux/kvm_host.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
22 #include <linux/smp.h>
24 /* ARMv8 Cortex-A53 specific event types. */
25 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
27 /* ARMv8 Cavium ThunderX specific event types. */
28 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
29 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
30 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
31 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
32 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
35 * ARMv8 Architectural defined events, not all of these may
36 * be supported on any given implementation. Unsupported events will
37 * be disabled at run-time based on the PMCEID registers.
39 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
40 PERF_MAP_ALL_UNSUPPORTED,
41 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
42 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
43 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
44 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
45 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
46 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
47 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
48 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
49 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
52 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
53 [PERF_COUNT_HW_CACHE_OP_MAX]
54 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
55 PERF_CACHE_MAP_ALL_UNSUPPORTED,
57 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
58 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
60 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
61 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
63 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
64 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
66 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
67 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
69 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
70 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
73 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
74 [PERF_COUNT_HW_CACHE_OP_MAX]
75 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
76 PERF_CACHE_MAP_ALL_UNSUPPORTED,
78 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
80 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
81 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
84 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
85 [PERF_COUNT_HW_CACHE_OP_MAX]
86 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
87 PERF_CACHE_MAP_ALL_UNSUPPORTED,
89 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
90 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
91 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
92 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
94 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
95 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
97 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
98 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
101 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
102 [PERF_COUNT_HW_CACHE_OP_MAX]
103 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
104 PERF_CACHE_MAP_ALL_UNSUPPORTED,
106 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
107 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
110 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
111 [PERF_COUNT_HW_CACHE_OP_MAX]
112 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
113 PERF_CACHE_MAP_ALL_UNSUPPORTED,
115 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
116 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
117 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
118 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
119 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
120 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
122 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
123 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
125 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
126 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
127 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
128 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
131 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
132 [PERF_COUNT_HW_CACHE_OP_MAX]
133 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
134 PERF_CACHE_MAP_ALL_UNSUPPORTED,
136 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
137 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
138 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
139 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
141 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
142 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
143 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
144 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
146 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
147 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
151 armv8pmu_events_sysfs_show(struct device *dev,
152 struct device_attribute *attr, char *page)
154 struct perf_pmu_events_attr *pmu_attr;
156 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
158 return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
161 #define ARMV8_EVENT_ATTR(name, config) \
162 (&((struct perf_pmu_events_attr) { \
163 .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
167 static struct attribute *armv8_pmuv3_event_attrs[] = {
168 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
169 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
170 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
171 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
172 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
173 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
174 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
175 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
176 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
177 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
178 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
179 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
180 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
181 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
182 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
183 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
184 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
185 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
186 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
187 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
188 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
189 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
190 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
191 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
192 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
193 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
194 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
195 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
196 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
197 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
198 /* Don't expose the chain event in /sys, since it's useless in isolation */
199 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
200 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
201 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
202 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
203 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
204 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
205 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
206 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
207 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
208 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
209 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
210 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
211 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
212 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
213 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
214 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
215 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
216 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
217 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
218 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
219 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
220 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
221 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
222 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
223 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
224 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
225 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
226 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
227 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
228 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
233 armv8pmu_event_attr_is_visible(struct kobject *kobj,
234 struct attribute *attr, int unused)
236 struct device *dev = kobj_to_dev(kobj);
237 struct pmu *pmu = dev_get_drvdata(dev);
238 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
239 struct perf_pmu_events_attr *pmu_attr;
241 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
243 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
244 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
247 pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
248 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
249 test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
255 static struct attribute_group armv8_pmuv3_events_attr_group = {
257 .attrs = armv8_pmuv3_event_attrs,
258 .is_visible = armv8pmu_event_attr_is_visible,
261 PMU_FORMAT_ATTR(event, "config:0-15");
262 PMU_FORMAT_ATTR(long, "config1:0");
264 static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
266 return event->attr.config1 & 0x1;
269 static struct attribute *armv8_pmuv3_format_attrs[] = {
270 &format_attr_event.attr,
271 &format_attr_long.attr,
275 static struct attribute_group armv8_pmuv3_format_attr_group = {
277 .attrs = armv8_pmuv3_format_attrs,
281 * Perf Events' indices
283 #define ARMV8_IDX_CYCLE_COUNTER 0
284 #define ARMV8_IDX_COUNTER0 1
285 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
286 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
289 * We must chain two programmable counters for 64 bit events,
290 * except when we have allocated the 64bit cycle counter (for CPU
291 * cycles event). This must be called only when the event has
292 * a counter allocated.
294 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
296 int idx = event->hw.idx;
298 return !WARN_ON(idx < 0) &&
299 armv8pmu_event_is_64bit(event) &&
300 (idx != ARMV8_IDX_CYCLE_COUNTER);
304 * ARMv8 low level PMU access
308 * Perf Event to low level counters mapping
310 #define ARMV8_IDX_TO_COUNTER(x) \
311 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
313 static inline u32 armv8pmu_pmcr_read(void)
315 return read_sysreg(pmcr_el0);
318 static inline void armv8pmu_pmcr_write(u32 val)
320 val &= ARMV8_PMU_PMCR_MASK;
322 write_sysreg(val, pmcr_el0);
325 static inline int armv8pmu_has_overflowed(u32 pmovsr)
327 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
330 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
332 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
333 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
336 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
338 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
341 static inline void armv8pmu_select_counter(int idx)
343 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
344 write_sysreg(counter, pmselr_el0);
348 static inline u32 armv8pmu_read_evcntr(int idx)
350 armv8pmu_select_counter(idx);
351 return read_sysreg(pmxevcntr_el0);
354 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
356 int idx = event->hw.idx;
359 val = armv8pmu_read_evcntr(idx);
360 if (armv8pmu_event_is_chained(event))
361 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
365 static u64 armv8pmu_read_counter(struct perf_event *event)
367 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
368 struct hw_perf_event *hwc = &event->hw;
372 if (!armv8pmu_counter_valid(cpu_pmu, idx))
373 pr_err("CPU%u reading wrong counter %d\n",
374 smp_processor_id(), idx);
375 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
376 value = read_sysreg(pmccntr_el0);
378 value = armv8pmu_read_hw_counter(event);
383 static inline void armv8pmu_write_evcntr(int idx, u32 value)
385 armv8pmu_select_counter(idx);
386 write_sysreg(value, pmxevcntr_el0);
389 static inline void armv8pmu_write_hw_counter(struct perf_event *event,
392 int idx = event->hw.idx;
394 if (armv8pmu_event_is_chained(event)) {
395 armv8pmu_write_evcntr(idx, upper_32_bits(value));
396 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
398 armv8pmu_write_evcntr(idx, value);
402 static void armv8pmu_write_counter(struct perf_event *event, u64 value)
404 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
405 struct hw_perf_event *hwc = &event->hw;
408 if (!armv8pmu_counter_valid(cpu_pmu, idx))
409 pr_err("CPU%u writing wrong counter %d\n",
410 smp_processor_id(), idx);
411 else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
413 * The cycles counter is really a 64-bit counter.
414 * When treating it as a 32-bit counter, we only count
415 * the lower 32 bits, and set the upper 32-bits so that
416 * we get an interrupt upon 32-bit overflow.
418 if (!armv8pmu_event_is_64bit(event))
419 value |= 0xffffffff00000000ULL;
420 write_sysreg(value, pmccntr_el0);
422 armv8pmu_write_hw_counter(event, value);
425 static inline void armv8pmu_write_evtype(int idx, u32 val)
427 armv8pmu_select_counter(idx);
428 val &= ARMV8_PMU_EVTYPE_MASK;
429 write_sysreg(val, pmxevtyper_el0);
432 static inline void armv8pmu_write_event_type(struct perf_event *event)
434 struct hw_perf_event *hwc = &event->hw;
438 * For chained events, the low counter is programmed to count
439 * the event of interest and the high counter is programmed
440 * with CHAIN event code with filters set to count at all ELs.
442 if (armv8pmu_event_is_chained(event)) {
443 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
444 ARMV8_PMU_INCLUDE_EL2;
446 armv8pmu_write_evtype(idx - 1, hwc->config_base);
447 armv8pmu_write_evtype(idx, chain_evt);
449 armv8pmu_write_evtype(idx, hwc->config_base);
453 static inline int armv8pmu_enable_counter(int idx)
455 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
456 write_sysreg(BIT(counter), pmcntenset_el0);
460 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
462 struct perf_event_attr *attr = &event->attr;
463 int idx = event->hw.idx;
464 u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
466 if (armv8pmu_event_is_chained(event))
467 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
469 kvm_set_pmu_events(counter_bits, attr);
471 /* We rely on the hypervisor switch code to enable guest counters */
472 if (!kvm_pmu_counter_deferred(attr)) {
473 armv8pmu_enable_counter(idx);
474 if (armv8pmu_event_is_chained(event))
475 armv8pmu_enable_counter(idx - 1);
479 static inline int armv8pmu_disable_counter(int idx)
481 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
482 write_sysreg(BIT(counter), pmcntenclr_el0);
486 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
488 struct hw_perf_event *hwc = &event->hw;
489 struct perf_event_attr *attr = &event->attr;
491 u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
493 if (armv8pmu_event_is_chained(event))
494 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
496 kvm_clr_pmu_events(counter_bits);
498 /* We rely on the hypervisor switch code to disable guest counters */
499 if (!kvm_pmu_counter_deferred(attr)) {
500 if (armv8pmu_event_is_chained(event))
501 armv8pmu_disable_counter(idx - 1);
502 armv8pmu_disable_counter(idx);
506 static inline int armv8pmu_enable_intens(int idx)
508 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
509 write_sysreg(BIT(counter), pmintenset_el1);
513 static inline int armv8pmu_enable_event_irq(struct perf_event *event)
515 return armv8pmu_enable_intens(event->hw.idx);
518 static inline int armv8pmu_disable_intens(int idx)
520 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
521 write_sysreg(BIT(counter), pmintenclr_el1);
523 /* Clear the overflow flag in case an interrupt is pending. */
524 write_sysreg(BIT(counter), pmovsclr_el0);
530 static inline int armv8pmu_disable_event_irq(struct perf_event *event)
532 return armv8pmu_disable_intens(event->hw.idx);
535 static inline u32 armv8pmu_getreset_flags(void)
540 value = read_sysreg(pmovsclr_el0);
542 /* Write to clear flags */
543 value &= ARMV8_PMU_OVSR_MASK;
544 write_sysreg(value, pmovsclr_el0);
549 static void armv8pmu_enable_event(struct perf_event *event)
552 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
553 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
556 * Enable counter and interrupt, and set the counter to count
557 * the event that we're interested in.
559 raw_spin_lock_irqsave(&events->pmu_lock, flags);
564 armv8pmu_disable_event_counter(event);
567 * Set event (if destined for PMNx counters).
569 armv8pmu_write_event_type(event);
572 * Enable interrupt for this counter
574 armv8pmu_enable_event_irq(event);
579 armv8pmu_enable_event_counter(event);
581 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
584 static void armv8pmu_disable_event(struct perf_event *event)
587 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
588 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
591 * Disable counter and interrupt
593 raw_spin_lock_irqsave(&events->pmu_lock, flags);
598 armv8pmu_disable_event_counter(event);
601 * Disable interrupt for this counter
603 armv8pmu_disable_event_irq(event);
605 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
608 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
611 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
613 raw_spin_lock_irqsave(&events->pmu_lock, flags);
614 /* Enable all counters */
615 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
616 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
619 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
622 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
624 raw_spin_lock_irqsave(&events->pmu_lock, flags);
625 /* Disable all counters */
626 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
627 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
630 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
633 struct perf_sample_data data;
634 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
635 struct pt_regs *regs;
639 * Get and reset the IRQ flags
641 pmovsr = armv8pmu_getreset_flags();
644 * Did an overflow occur?
646 if (!armv8pmu_has_overflowed(pmovsr))
650 * Handle the counter(s) overflow(s)
652 regs = get_irq_regs();
655 * Stop the PMU while processing the counter overflows
656 * to prevent skews in group events.
658 armv8pmu_stop(cpu_pmu);
659 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
660 struct perf_event *event = cpuc->events[idx];
661 struct hw_perf_event *hwc;
663 /* Ignore if we don't have an event. */
668 * We have a single interrupt for all counters. Check that
669 * each counter has overflowed before we process it.
671 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
675 armpmu_event_update(event);
676 perf_sample_data_init(&data, 0, hwc->last_period);
677 if (!armpmu_event_set_period(event))
680 if (perf_event_overflow(event, &data, regs))
681 cpu_pmu->disable(event);
683 armv8pmu_start(cpu_pmu);
686 * Handle the pending perf events.
688 * Note: this call *must* be run with interrupts disabled. For
689 * platforms that can have the PMU interrupts raised as an NMI, this
697 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
698 struct arm_pmu *cpu_pmu)
702 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
703 if (!test_and_set_bit(idx, cpuc->used_mask))
709 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
710 struct arm_pmu *cpu_pmu)
715 * Chaining requires two consecutive event counters, where
716 * the lower idx must be even.
718 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
719 if (!test_and_set_bit(idx, cpuc->used_mask)) {
720 /* Check if the preceding even counter is available */
721 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
723 /* Release the Odd counter */
724 clear_bit(idx, cpuc->used_mask);
730 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
731 struct perf_event *event)
733 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
734 struct hw_perf_event *hwc = &event->hw;
735 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
737 /* Always prefer to place a cycle counter into the cycle counter. */
738 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
739 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
740 return ARMV8_IDX_CYCLE_COUNTER;
744 * Otherwise use events counters
746 if (armv8pmu_event_is_64bit(event))
747 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
749 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
752 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
753 struct perf_event *event)
755 int idx = event->hw.idx;
757 clear_bit(idx, cpuc->used_mask);
758 if (armv8pmu_event_is_chained(event))
759 clear_bit(idx - 1, cpuc->used_mask);
763 * Add an event filter to a given event.
765 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
766 struct perf_event_attr *attr)
768 unsigned long config_base = 0;
770 if (attr->exclude_idle)
774 * If we're running in hyp mode, then we *are* the hypervisor.
775 * Therefore we ignore exclude_hv in this configuration, since
776 * there's no hypervisor to sample anyway. This is consistent
777 * with other architectures (x86 and Power).
779 if (is_kernel_in_hyp_mode()) {
780 if (!attr->exclude_kernel && !attr->exclude_host)
781 config_base |= ARMV8_PMU_INCLUDE_EL2;
782 if (attr->exclude_guest)
783 config_base |= ARMV8_PMU_EXCLUDE_EL1;
784 if (attr->exclude_host)
785 config_base |= ARMV8_PMU_EXCLUDE_EL0;
787 if (!attr->exclude_hv && !attr->exclude_host)
788 config_base |= ARMV8_PMU_INCLUDE_EL2;
792 * Filter out !VHE kernels and guest kernels
794 if (attr->exclude_kernel)
795 config_base |= ARMV8_PMU_EXCLUDE_EL1;
797 if (attr->exclude_user)
798 config_base |= ARMV8_PMU_EXCLUDE_EL0;
801 * Install the filter into config_base as this is used to
802 * construct the event type.
804 event->config_base = config_base;
809 static int armv8pmu_filter_match(struct perf_event *event)
811 unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
812 return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
815 static void armv8pmu_reset(void *info)
817 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
818 u32 idx, nb_cnt = cpu_pmu->num_events;
820 /* The counter and interrupt enable registers are unknown at reset. */
821 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
822 armv8pmu_disable_counter(idx);
823 armv8pmu_disable_intens(idx);
826 /* Clear the counters we flip at guest entry/exit */
827 kvm_clr_pmu_events(U32_MAX);
830 * Initialize & Reset PMNC. Request overflow interrupt for
831 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
833 armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
837 static int __armv8_pmuv3_map_event(struct perf_event *event,
838 const unsigned (*extra_event_map)
840 const unsigned (*extra_cache_map)
841 [PERF_COUNT_HW_CACHE_MAX]
842 [PERF_COUNT_HW_CACHE_OP_MAX]
843 [PERF_COUNT_HW_CACHE_RESULT_MAX])
846 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
848 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
849 &armv8_pmuv3_perf_cache_map,
850 ARMV8_PMU_EVTYPE_EVENT);
852 if (armv8pmu_event_is_64bit(event))
853 event->hw.flags |= ARMPMU_EVT_64BIT;
855 /* Only expose micro/arch events supported by this PMU */
856 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
857 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
861 return armpmu_map_event(event, extra_event_map, extra_cache_map,
862 ARMV8_PMU_EVTYPE_EVENT);
865 static int armv8_pmuv3_map_event(struct perf_event *event)
867 return __armv8_pmuv3_map_event(event, NULL, NULL);
870 static int armv8_a53_map_event(struct perf_event *event)
872 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
875 static int armv8_a57_map_event(struct perf_event *event)
877 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
880 static int armv8_a73_map_event(struct perf_event *event)
882 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
885 static int armv8_thunder_map_event(struct perf_event *event)
887 return __armv8_pmuv3_map_event(event, NULL,
888 &armv8_thunder_perf_cache_map);
891 static int armv8_vulcan_map_event(struct perf_event *event)
893 return __armv8_pmuv3_map_event(event, NULL,
894 &armv8_vulcan_perf_cache_map);
897 struct armv8pmu_probe_info {
902 static void __armv8pmu_probe_pmu(void *info)
904 struct armv8pmu_probe_info *probe = info;
905 struct arm_pmu *cpu_pmu = probe->pmu;
911 dfr0 = read_sysreg(id_aa64dfr0_el1);
912 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
913 ID_AA64DFR0_PMUVER_SHIFT);
914 if (pmuver == 0xf || pmuver == 0)
917 probe->present = true;
919 /* Read the nb of CNTx counters supported from PMNC */
920 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
921 & ARMV8_PMU_PMCR_N_MASK;
923 /* Add the CPU cycles counter */
924 cpu_pmu->num_events += 1;
926 pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
927 pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
929 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
930 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
932 pmceid[0] = pmceid_raw[0] >> 32;
933 pmceid[1] = pmceid_raw[1] >> 32;
935 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
936 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
939 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
941 struct armv8pmu_probe_info probe = {
947 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
948 __armv8pmu_probe_pmu,
953 return probe.present ? 0 : -ENODEV;
956 static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
958 int ret = armv8pmu_probe_pmu(cpu_pmu);
962 cpu_pmu->handle_irq = armv8pmu_handle_irq;
963 cpu_pmu->enable = armv8pmu_enable_event;
964 cpu_pmu->disable = armv8pmu_disable_event;
965 cpu_pmu->read_counter = armv8pmu_read_counter;
966 cpu_pmu->write_counter = armv8pmu_write_counter;
967 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
968 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
969 cpu_pmu->start = armv8pmu_start;
970 cpu_pmu->stop = armv8pmu_stop;
971 cpu_pmu->reset = armv8pmu_reset;
972 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
973 cpu_pmu->filter_match = armv8pmu_filter_match;
978 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
980 int ret = armv8_pmu_init(cpu_pmu);
984 cpu_pmu->name = "armv8_pmuv3";
985 cpu_pmu->map_event = armv8_pmuv3_map_event;
986 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
987 &armv8_pmuv3_events_attr_group;
988 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
989 &armv8_pmuv3_format_attr_group;
994 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
996 int ret = armv8_pmu_init(cpu_pmu);
1000 cpu_pmu->name = "armv8_cortex_a35";
1001 cpu_pmu->map_event = armv8_a53_map_event;
1002 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1003 &armv8_pmuv3_events_attr_group;
1004 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1005 &armv8_pmuv3_format_attr_group;
1010 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1012 int ret = armv8_pmu_init(cpu_pmu);
1016 cpu_pmu->name = "armv8_cortex_a53";
1017 cpu_pmu->map_event = armv8_a53_map_event;
1018 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1019 &armv8_pmuv3_events_attr_group;
1020 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1021 &armv8_pmuv3_format_attr_group;
1026 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1028 int ret = armv8_pmu_init(cpu_pmu);
1032 cpu_pmu->name = "armv8_cortex_a57";
1033 cpu_pmu->map_event = armv8_a57_map_event;
1034 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1035 &armv8_pmuv3_events_attr_group;
1036 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1037 &armv8_pmuv3_format_attr_group;
1042 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1044 int ret = armv8_pmu_init(cpu_pmu);
1048 cpu_pmu->name = "armv8_cortex_a72";
1049 cpu_pmu->map_event = armv8_a57_map_event;
1050 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1051 &armv8_pmuv3_events_attr_group;
1052 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1053 &armv8_pmuv3_format_attr_group;
1058 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1060 int ret = armv8_pmu_init(cpu_pmu);
1064 cpu_pmu->name = "armv8_cortex_a73";
1065 cpu_pmu->map_event = armv8_a73_map_event;
1066 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1067 &armv8_pmuv3_events_attr_group;
1068 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1069 &armv8_pmuv3_format_attr_group;
1074 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1076 int ret = armv8_pmu_init(cpu_pmu);
1080 cpu_pmu->name = "armv8_cavium_thunder";
1081 cpu_pmu->map_event = armv8_thunder_map_event;
1082 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1083 &armv8_pmuv3_events_attr_group;
1084 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1085 &armv8_pmuv3_format_attr_group;
1090 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1092 int ret = armv8_pmu_init(cpu_pmu);
1096 cpu_pmu->name = "armv8_brcm_vulcan";
1097 cpu_pmu->map_event = armv8_vulcan_map_event;
1098 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1099 &armv8_pmuv3_events_attr_group;
1100 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1101 &armv8_pmuv3_format_attr_group;
1106 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1107 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
1108 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
1109 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
1110 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
1111 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
1112 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
1113 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
1114 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
1118 static int armv8_pmu_device_probe(struct platform_device *pdev)
1120 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1123 static struct platform_driver armv8_pmu_driver = {
1125 .name = ARMV8_PMU_PDEV_NAME,
1126 .of_match_table = armv8_pmu_of_device_ids,
1127 .suppress_bind_attrs = true,
1129 .probe = armv8_pmu_device_probe,
1132 static int __init armv8_pmu_driver_init(void)
1135 return platform_driver_register(&armv8_pmu_driver);
1137 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1139 device_initcall(armv8_pmu_driver_init)
1141 void arch_perf_update_userpage(struct perf_event *event,
1142 struct perf_event_mmap_page *userpg, u64 now)
1148 * Internal timekeeping for enabled/running/stopped times
1149 * is always computed with the sched_clock.
1151 freq = arch_timer_get_rate();
1152 userpg->cap_user_time = 1;
1154 clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
1157 * time_shift is not expected to be greater than 31 due to
1158 * the original published conversion algorithm shifting a
1159 * 32-bit value (now specifies a 64-bit value) - refer
1160 * perf_event_mmap_page documentation in perf_event.h.
1164 userpg->time_mult >>= 1;
1166 userpg->time_shift = (u16)shift;
1167 userpg->time_offset = -now;