1 // SPDX-License-Identifier: GPL-2.0
4 * This driver adds support for perf events to use the Performance
5 * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6 * to monitor that node.
8 * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10 * to 4K boundary. For example, the PMCG at 0xff88840000 is named
13 * Filtering by stream id is done by specifying filtering parameters
14 * with the event. options are:
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
21 * with a value that contains:
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
24 * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25 * contain a value to match from the corresponding bits of event StreamID.
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
29 * Applies filter pattern 0x42 to transaction events, which means events
30 * matching stream ids 0x42 and 0x43 are counted. Further filtering
31 * information is available in the SMMU documentation.
33 * SMMU events are not attributable to a CPU, so task mode and sampling
37 #include <linux/acpi.h>
38 #include <linux/acpi_iort.h>
39 #include <linux/bitfield.h>
40 #include <linux/bitops.h>
41 #include <linux/cpuhotplug.h>
42 #include <linux/cpumask.h>
43 #include <linux/device.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/irq.h>
47 #include <linux/kernel.h>
48 #include <linux/list.h>
49 #include <linux/msi.h>
50 #include <linux/perf_event.h>
51 #include <linux/platform_device.h>
52 #include <linux/smp.h>
53 #include <linux/sysfs.h>
54 #include <linux/types.h>
56 #define SMMU_PMCG_EVCNTR0 0x0
57 #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
58 #define SMMU_PMCG_EVTYPER0 0x400
59 #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
60 #define SMMU_PMCG_SID_SPAN_SHIFT 29
61 #define SMMU_PMCG_SMR0 0xA00
62 #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
63 #define SMMU_PMCG_CNTENSET0 0xC00
64 #define SMMU_PMCG_CNTENCLR0 0xC20
65 #define SMMU_PMCG_INTENSET0 0xC40
66 #define SMMU_PMCG_INTENCLR0 0xC60
67 #define SMMU_PMCG_OVSCLR0 0xC80
68 #define SMMU_PMCG_OVSSET0 0xCC0
69 #define SMMU_PMCG_CFGR 0xE00
70 #define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
71 #define SMMU_PMCG_CFGR_MSI BIT(21)
72 #define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
73 #define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
74 #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
75 #define SMMU_PMCG_CR 0xE04
76 #define SMMU_PMCG_CR_ENABLE BIT(0)
77 #define SMMU_PMCG_IIDR 0xE08
78 #define SMMU_PMCG_CEID0 0xE20
79 #define SMMU_PMCG_CEID1 0xE28
80 #define SMMU_PMCG_IRQ_CTRL 0xE50
81 #define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
82 #define SMMU_PMCG_IRQ_CFG0 0xE58
83 #define SMMU_PMCG_IRQ_CFG1 0xE60
84 #define SMMU_PMCG_IRQ_CFG2 0xE64
86 /* MSI config fields */
87 #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
88 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
90 #define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
91 #define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
93 #define SMMU_PMCG_MAX_COUNTERS 64
94 #define SMMU_PMCG_ARCH_MAX_EVENTS 128
96 #define SMMU_PMCG_PA_SHIFT 12
98 #define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
100 static int cpuhp_state_num;
103 struct hlist_node node;
104 struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
105 DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
106 DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
110 unsigned int num_counters;
112 void __iomem *reg_base;
113 void __iomem *reloc_base;
120 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
122 #define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
123 static inline u32 get_##_name(struct perf_event *event) \
125 return FIELD_GET(GENMASK_ULL(_end, _start), \
126 event->attr._config); \
129 SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
130 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
131 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
132 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
134 static inline void smmu_pmu_enable(struct pmu *pmu)
136 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
138 writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
139 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
140 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
143 static inline void smmu_pmu_disable(struct pmu *pmu)
145 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
147 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
148 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
151 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
154 if (smmu_pmu->counter_mask & BIT(32))
155 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
157 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
160 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
164 if (smmu_pmu->counter_mask & BIT(32))
165 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
167 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
172 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
174 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
177 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
179 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
182 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
184 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
187 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
190 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
193 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
196 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
199 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
201 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
204 static void smmu_pmu_event_update(struct perf_event *event)
206 struct hw_perf_event *hwc = &event->hw;
207 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
208 u64 delta, prev, now;
212 prev = local64_read(&hwc->prev_count);
213 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
214 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
216 /* handle overflow. */
218 delta &= smmu_pmu->counter_mask;
220 local64_add(delta, &event->count);
223 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
224 struct hw_perf_event *hwc)
229 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
231 * On platforms that require this quirk, if the counter starts
232 * at < half_counter value and wraps, the current logic of
233 * handling the overflow may not work. It is expected that,
234 * those platforms will have full 64 counter bits implemented
235 * so that such a possibility is remote(eg: HiSilicon HIP08).
237 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
240 * We limit the max period to half the max counter value
241 * of the counter size, so that even in the case of extreme
242 * interrupt latency the counter will (hopefully) not wrap
243 * past its initial value.
245 new = smmu_pmu->counter_mask >> 1;
246 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
249 local64_set(&hwc->prev_count, new);
252 static void smmu_pmu_set_event_filter(struct perf_event *event,
253 int idx, u32 span, u32 sid)
255 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
258 evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
259 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
260 smmu_pmu_set_smr(smmu_pmu, idx, sid);
263 static bool smmu_pmu_check_global_filter(struct perf_event *curr,
264 struct perf_event *new)
266 if (get_filter_enable(new) != get_filter_enable(curr))
269 if (!get_filter_enable(new))
272 return get_filter_span(new) == get_filter_span(curr) &&
273 get_filter_stream_id(new) == get_filter_stream_id(curr);
276 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
277 struct perf_event *event, int idx)
280 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
281 bool filter_en = !!get_filter_enable(event);
283 span = filter_en ? get_filter_span(event) :
284 SMMU_PMCG_DEFAULT_FILTER_SPAN;
285 sid = filter_en ? get_filter_stream_id(event) :
286 SMMU_PMCG_DEFAULT_FILTER_SID;
288 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
290 * Per-counter filtering, or scheduling the first globally-filtered
291 * event into an empty PMU so idx == 0 and it works out equivalent.
293 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
294 smmu_pmu_set_event_filter(event, idx, span, sid);
298 /* Otherwise, must match whatever's currently scheduled */
299 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
300 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
307 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
308 struct perf_event *event)
311 unsigned int num_ctrs = smmu_pmu->num_counters;
313 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
315 /* The counters are all in use. */
318 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
322 set_bit(idx, smmu_pmu->used_counters);
327 static bool smmu_pmu_events_compatible(struct perf_event *curr,
328 struct perf_event *new)
330 if (new->pmu != curr->pmu)
333 if (to_smmu_pmu(new->pmu)->global_filter &&
334 !smmu_pmu_check_global_filter(curr, new))
341 * Implementation of abstract pmu functionality required by
342 * the core perf events code.
345 static int smmu_pmu_event_init(struct perf_event *event)
347 struct hw_perf_event *hwc = &event->hw;
348 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
349 struct device *dev = smmu_pmu->dev;
350 struct perf_event *sibling;
351 int group_num_events = 1;
354 if (event->attr.type != event->pmu->type)
357 if (hwc->sample_period) {
358 dev_dbg(dev, "Sampling not supported\n");
362 if (event->cpu < 0) {
363 dev_dbg(dev, "Per-task mode not supported\n");
367 /* Verify specified event is supported on this PMU */
368 event_id = get_event(event);
369 if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
370 (!test_bit(event_id, smmu_pmu->supported_events))) {
371 dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
375 /* Don't allow groups with mixed PMUs, except for s/w events */
376 if (!is_software_event(event->group_leader)) {
377 if (!smmu_pmu_events_compatible(event->group_leader, event))
380 if (++group_num_events > smmu_pmu->num_counters)
384 for_each_sibling_event(sibling, event->group_leader) {
385 if (is_software_event(sibling))
388 if (!smmu_pmu_events_compatible(sibling, event))
391 if (++group_num_events > smmu_pmu->num_counters)
398 * Ensure all events are on the same cpu so all events are in the
399 * same cpu context, to avoid races on pmu_enable etc.
401 event->cpu = smmu_pmu->on_cpu;
406 static void smmu_pmu_event_start(struct perf_event *event, int flags)
408 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
409 struct hw_perf_event *hwc = &event->hw;
414 smmu_pmu_set_period(smmu_pmu, hwc);
416 smmu_pmu_counter_enable(smmu_pmu, idx);
419 static void smmu_pmu_event_stop(struct perf_event *event, int flags)
421 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
422 struct hw_perf_event *hwc = &event->hw;
425 if (hwc->state & PERF_HES_STOPPED)
428 smmu_pmu_counter_disable(smmu_pmu, idx);
429 /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
430 smmu_pmu_event_update(event);
431 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
434 static int smmu_pmu_event_add(struct perf_event *event, int flags)
436 struct hw_perf_event *hwc = &event->hw;
438 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
440 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
445 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
446 smmu_pmu->events[idx] = event;
447 local64_set(&hwc->prev_count, 0);
449 smmu_pmu_interrupt_enable(smmu_pmu, idx);
451 if (flags & PERF_EF_START)
452 smmu_pmu_event_start(event, flags);
454 /* Propagate changes to the userspace mapping. */
455 perf_event_update_userpage(event);
460 static void smmu_pmu_event_del(struct perf_event *event, int flags)
462 struct hw_perf_event *hwc = &event->hw;
463 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
466 smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
467 smmu_pmu_interrupt_disable(smmu_pmu, idx);
468 smmu_pmu->events[idx] = NULL;
469 clear_bit(idx, smmu_pmu->used_counters);
471 perf_event_update_userpage(event);
474 static void smmu_pmu_event_read(struct perf_event *event)
476 smmu_pmu_event_update(event);
481 static ssize_t smmu_pmu_cpumask_show(struct device *dev,
482 struct device_attribute *attr,
485 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
487 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
490 static struct device_attribute smmu_pmu_cpumask_attr =
491 __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
493 static struct attribute *smmu_pmu_cpumask_attrs[] = {
494 &smmu_pmu_cpumask_attr.attr,
498 static const struct attribute_group smmu_pmu_cpumask_group = {
499 .attrs = smmu_pmu_cpumask_attrs,
504 static ssize_t smmu_pmu_event_show(struct device *dev,
505 struct device_attribute *attr, char *page)
507 struct perf_pmu_events_attr *pmu_attr;
509 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
511 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
514 #define SMMU_EVENT_ATTR(name, config) \
515 PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
517 static struct attribute *smmu_pmu_events[] = {
518 SMMU_EVENT_ATTR(cycles, 0),
519 SMMU_EVENT_ATTR(transaction, 1),
520 SMMU_EVENT_ATTR(tlb_miss, 2),
521 SMMU_EVENT_ATTR(config_cache_miss, 3),
522 SMMU_EVENT_ATTR(trans_table_walk_access, 4),
523 SMMU_EVENT_ATTR(config_struct_access, 5),
524 SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
525 SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
529 static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
530 struct attribute *attr, int unused)
532 struct device *dev = kobj_to_dev(kobj);
533 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
534 struct perf_pmu_events_attr *pmu_attr;
536 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
538 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
544 static const struct attribute_group smmu_pmu_events_group = {
546 .attrs = smmu_pmu_events,
547 .is_visible = smmu_pmu_event_is_visible,
550 static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
551 struct device_attribute *attr,
554 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
556 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
559 static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
560 struct attribute *attr,
563 struct device *dev = kobj_to_dev(kobj);
564 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
571 static struct device_attribute smmu_pmu_identifier_attr =
572 __ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
574 static struct attribute *smmu_pmu_identifier_attrs[] = {
575 &smmu_pmu_identifier_attr.attr,
579 static const struct attribute_group smmu_pmu_identifier_group = {
580 .attrs = smmu_pmu_identifier_attrs,
581 .is_visible = smmu_pmu_identifier_attr_visible,
585 PMU_FORMAT_ATTR(event, "config:0-15");
586 PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
587 PMU_FORMAT_ATTR(filter_span, "config1:32");
588 PMU_FORMAT_ATTR(filter_enable, "config1:33");
590 static struct attribute *smmu_pmu_formats[] = {
591 &format_attr_event.attr,
592 &format_attr_filter_stream_id.attr,
593 &format_attr_filter_span.attr,
594 &format_attr_filter_enable.attr,
598 static const struct attribute_group smmu_pmu_format_group = {
600 .attrs = smmu_pmu_formats,
603 static const struct attribute_group *smmu_pmu_attr_grps[] = {
604 &smmu_pmu_cpumask_group,
605 &smmu_pmu_events_group,
606 &smmu_pmu_format_group,
607 &smmu_pmu_identifier_group,
612 * Generic device handlers
615 static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
617 struct smmu_pmu *smmu_pmu;
620 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
621 if (cpu != smmu_pmu->on_cpu)
624 target = cpumask_any_but(cpu_online_mask, cpu);
625 if (target >= nr_cpu_ids)
628 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
629 smmu_pmu->on_cpu = target;
630 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
635 static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
637 struct smmu_pmu *smmu_pmu = data;
641 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
645 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
647 for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
648 struct perf_event *event = smmu_pmu->events[idx];
649 struct hw_perf_event *hwc;
651 if (WARN_ON_ONCE(!event))
654 smmu_pmu_event_update(event);
657 smmu_pmu_set_period(smmu_pmu, hwc);
663 static void smmu_pmu_free_msis(void *data)
665 struct device *dev = data;
667 platform_msi_domain_free_irqs(dev);
670 static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
672 phys_addr_t doorbell;
673 struct device *dev = msi_desc_to_dev(desc);
674 struct smmu_pmu *pmu = dev_get_drvdata(dev);
676 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
677 doorbell &= MSI_CFG0_ADDR_MASK;
679 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
680 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
681 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
682 pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
685 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
687 struct msi_desc *desc;
688 struct device *dev = pmu->dev;
691 /* Clear MSI address reg */
692 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
694 /* MSI supported or not */
695 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
698 ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
700 dev_warn(dev, "failed to allocate MSIs\n");
704 desc = first_msi_entry(dev);
706 pmu->irq = desc->irq;
708 /* Add callback to free MSIs on teardown */
709 devm_add_action(dev, smmu_pmu_free_msis, dev);
712 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
714 unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
715 int irq, ret = -ENXIO;
717 smmu_pmu_setup_msi(pmu);
721 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
722 flags, "smmuv3-pmu", pmu);
726 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
728 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
730 smmu_pmu_disable(&smmu_pmu->pmu);
732 /* Disable counter and interrupt */
733 writeq_relaxed(counter_present_mask,
734 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
735 writeq_relaxed(counter_present_mask,
736 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
737 writeq_relaxed(counter_present_mask,
738 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
741 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
745 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
748 case IORT_SMMU_V3_PMCG_HISI_HIP08:
749 /* HiSilicon Erratum 162001800 */
750 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
754 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
757 static int smmu_pmu_probe(struct platform_device *pdev)
759 struct smmu_pmu *smmu_pmu;
760 struct resource *res_0;
765 struct device *dev = &pdev->dev;
767 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
772 platform_set_drvdata(pdev, smmu_pmu);
774 smmu_pmu->pmu = (struct pmu) {
775 .module = THIS_MODULE,
776 .task_ctx_nr = perf_invalid_context,
777 .pmu_enable = smmu_pmu_enable,
778 .pmu_disable = smmu_pmu_disable,
779 .event_init = smmu_pmu_event_init,
780 .add = smmu_pmu_event_add,
781 .del = smmu_pmu_event_del,
782 .start = smmu_pmu_event_start,
783 .stop = smmu_pmu_event_stop,
784 .read = smmu_pmu_event_read,
785 .attr_groups = smmu_pmu_attr_grps,
786 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
789 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
790 if (IS_ERR(smmu_pmu->reg_base))
791 return PTR_ERR(smmu_pmu->reg_base);
793 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
795 /* Determine if page 1 is present */
796 if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
797 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
798 if (IS_ERR(smmu_pmu->reloc_base))
799 return PTR_ERR(smmu_pmu->reloc_base);
801 smmu_pmu->reloc_base = smmu_pmu->reg_base;
804 irq = platform_get_irq_optional(pdev, 0);
808 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
809 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
810 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
811 SMMU_PMCG_ARCH_MAX_EVENTS);
813 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
815 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
817 reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
818 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
820 smmu_pmu_reset(smmu_pmu);
822 err = smmu_pmu_setup_irq(smmu_pmu);
824 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
828 smmu_pmu->iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
830 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
831 (res_0->start) >> SMMU_PMCG_PA_SHIFT);
833 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
837 smmu_pmu_get_acpi_options(smmu_pmu);
839 /* Pick one CPU to be the preferred one to use */
840 smmu_pmu->on_cpu = raw_smp_processor_id();
841 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
843 err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
846 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
851 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
853 dev_err(dev, "Error %d registering PMU @%pa\n",
858 dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
859 &res_0->start, smmu_pmu->num_counters,
860 smmu_pmu->global_filter ? "Global(Counter0)" :
866 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
870 static int smmu_pmu_remove(struct platform_device *pdev)
872 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
874 perf_pmu_unregister(&smmu_pmu->pmu);
875 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
880 static void smmu_pmu_shutdown(struct platform_device *pdev)
882 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
884 smmu_pmu_disable(&smmu_pmu->pmu);
887 static struct platform_driver smmu_pmu_driver = {
889 .name = "arm-smmu-v3-pmcg",
890 .suppress_bind_attrs = true,
892 .probe = smmu_pmu_probe,
893 .remove = smmu_pmu_remove,
894 .shutdown = smmu_pmu_shutdown,
897 static int __init arm_smmu_pmu_init(void)
899 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
900 "perf/arm/pmcg:online",
902 smmu_pmu_offline_cpu);
903 if (cpuhp_state_num < 0)
904 return cpuhp_state_num;
906 return platform_driver_register(&smmu_pmu_driver);
908 module_init(arm_smmu_pmu_init);
910 static void __exit arm_smmu_pmu_exit(void)
912 platform_driver_unregister(&smmu_pmu_driver);
913 cpuhp_remove_multi_state(cpuhp_state_num);
916 module_exit(arm_smmu_pmu_exit);
918 MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
919 MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
920 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
921 MODULE_LICENSE("GPL v2");