1 // SPDX-License-Identifier: GPL-2.0
3 * ARM CoreSight Architecture PMU driver.
5 * This driver adds support for uncore PMU based on ARM CoreSight Performance
6 * Monitoring Unit Architecture. The PMU is accessible via MMIO registers and
7 * like other uncore PMUs, it does not support process specific events and
8 * cannot be used in sampling mode.
10 * This code is based on other uncore PMUs like ARM DSU PMU. It provides a
11 * generic implementation to operate the PMU according to CoreSight PMU
12 * architecture and ACPI ARM PMU table (APMT) documents below:
13 * - ARM CoreSight PMU architecture document number: ARM IHI 0091 A.a-00bet0.
14 * - APMT document number: ARM DEN0117.
16 * The user should refer to the vendor technical documentation to get details
17 * about the supported events.
19 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
23 #include <linux/acpi.h>
24 #include <linux/cacheinfo.h>
25 #include <linux/ctype.h>
26 #include <linux/interrupt.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/module.h>
29 #include <linux/perf_event.h>
30 #include <linux/platform_device.h>
32 #include "arm_cspmu.h"
33 #include "nvidia_cspmu.h"
35 #define PMUNAME "arm_cspmu"
36 #define DRVNAME "arm-cs-arch-pmu"
38 #define ARM_CSPMU_CPUMASK_ATTR(_name, _config) \
39 ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
40 (unsigned long)_config)
43 * CoreSight PMU Arch register offsets.
45 #define PMEVCNTR_LO 0x0
46 #define PMEVCNTR_HI 0x4
47 #define PMEVTYPER 0x400
48 #define PMCCFILTR 0x47C
49 #define PMEVFILTR 0xA00
50 #define PMCNTENSET 0xC00
51 #define PMCNTENCLR 0xC20
52 #define PMINTENSET 0xC40
53 #define PMINTENCLR 0xC60
54 #define PMOVSCLR 0xC80
55 #define PMOVSSET 0xCC0
60 /* PMCFGR register field */
61 #define PMCFGR_NCG GENMASK(31, 28)
62 #define PMCFGR_HDBG BIT(24)
63 #define PMCFGR_TRO BIT(23)
64 #define PMCFGR_SS BIT(22)
65 #define PMCFGR_FZO BIT(21)
66 #define PMCFGR_MSI BIT(20)
67 #define PMCFGR_UEN BIT(19)
68 #define PMCFGR_NA BIT(17)
69 #define PMCFGR_EX BIT(16)
70 #define PMCFGR_CCD BIT(15)
71 #define PMCFGR_CC BIT(14)
72 #define PMCFGR_SIZE GENMASK(13, 8)
73 #define PMCFGR_N GENMASK(7, 0)
75 /* PMCR register field */
76 #define PMCR_TRO BIT(11)
77 #define PMCR_HDBG BIT(10)
78 #define PMCR_FZO BIT(9)
79 #define PMCR_NA BIT(8)
80 #define PMCR_DP BIT(5)
87 /* Each SET/CLR register supports up to 32 counters. */
88 #define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
89 #define ARM_CSPMU_SET_CLR_COUNTER_NUM \
90 (1 << ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
92 /* Convert counter idx into SET/CLR register number. */
93 #define COUNTER_TO_SET_CLR_ID(idx) \
94 (idx >> ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
96 /* Convert counter idx into SET/CLR register bit. */
97 #define COUNTER_TO_SET_CLR_BIT(idx) \
98 (idx & (ARM_CSPMU_SET_CLR_COUNTER_NUM - 1))
100 #define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
101 #define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
103 /* Check and use default if implementer doesn't provide attribute callback */
104 #define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
106 if (!ops->callback) \
107 ops->callback = arm_cspmu_ ## callback; \
111 * Maximum poll count for reading counter value using high-low-high sequence.
113 #define HILOHI_MAX_POLL 1000
115 /* JEDEC-assigned JEP106 identification code */
116 #define ARM_CSPMU_IMPL_ID_NVIDIA 0x36B
118 static unsigned long arm_cspmu_cpuhp_state;
120 static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
122 return *(struct acpi_apmt_node **)dev_get_platdata(dev);
126 * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
127 * counter register. The counter register can be implemented as 32-bit or 64-bit
128 * register depending on the value of PMCFGR.SIZE field. For 64-bit access,
129 * single-copy 64-bit atomic support is implementation defined. APMT node flag
130 * is used to identify if the PMU supports 64-bit single copy atomic. If 64-bit
131 * single copy atomic is not supported, the driver treats the register as a pair
132 * of 32-bit register.
136 * Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.
138 static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
143 /* Use high-low-high sequence to avoid tearing */
145 if (max_poll_count-- == 0) {
146 pr_err("ARM CSPMU: timeout hi-low-high sequence\n");
150 val_hi = readl(addr + 4);
151 val_lo = readl(addr);
152 } while (val_hi != readl(addr + 4));
154 val = (((u64)val_hi << 32) | val_lo);
159 /* Check if cycle counter is supported. */
160 static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
162 return (cspmu->pmcfgr & PMCFGR_CC);
165 /* Get counter size, which is (PMCFGR_SIZE + 1). */
166 static inline u32 counter_size(const struct arm_cspmu *cspmu)
168 return FIELD_GET(PMCFGR_SIZE, cspmu->pmcfgr) + 1;
171 /* Get counter mask. */
172 static inline u64 counter_mask(const struct arm_cspmu *cspmu)
174 return GENMASK_ULL(counter_size(cspmu) - 1, 0);
177 /* Check if counter is implemented as 64-bit register. */
178 static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
180 return (counter_size(cspmu) > 32);
183 ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
186 struct perf_pmu_events_attr *pmu_attr;
188 pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
189 return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id);
191 EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
193 /* Default event list. */
194 static struct attribute *arm_cspmu_event_attrs[] = {
195 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
199 static struct attribute **
200 arm_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
202 struct attribute **attrs;
204 attrs = devm_kmemdup(cspmu->dev, arm_cspmu_event_attrs,
205 sizeof(arm_cspmu_event_attrs), GFP_KERNEL);
211 arm_cspmu_event_attr_is_visible(struct kobject *kobj,
212 struct attribute *attr, int unused)
214 struct device *dev = kobj_to_dev(kobj);
215 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
216 struct perf_pmu_events_attr *eattr;
218 eattr = container_of(attr, typeof(*eattr), attr.attr);
220 /* Hide cycle event if not supported */
221 if (!supports_cycle_counter(cspmu) &&
222 eattr->id == ARM_CSPMU_EVT_CYCLES_DEFAULT)
228 ssize_t arm_cspmu_sysfs_format_show(struct device *dev,
229 struct device_attribute *attr,
232 struct dev_ext_attribute *eattr =
233 container_of(attr, struct dev_ext_attribute, attr);
234 return sysfs_emit(buf, "%s\n", (char *)eattr->var);
236 EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_format_show);
238 static struct attribute *arm_cspmu_format_attrs[] = {
239 ARM_CSPMU_FORMAT_EVENT_ATTR,
240 ARM_CSPMU_FORMAT_FILTER_ATTR,
244 static struct attribute **
245 arm_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
247 struct attribute **attrs;
249 attrs = devm_kmemdup(cspmu->dev, arm_cspmu_format_attrs,
250 sizeof(arm_cspmu_format_attrs), GFP_KERNEL);
255 static u32 arm_cspmu_event_type(const struct perf_event *event)
257 return event->attr.config & ARM_CSPMU_EVENT_MASK;
260 static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
262 return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
265 static u32 arm_cspmu_event_filter(const struct perf_event *event)
267 return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
270 static ssize_t arm_cspmu_identifier_show(struct device *dev,
271 struct device_attribute *attr,
274 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
276 return sysfs_emit(page, "%s\n", cspmu->identifier);
279 static struct device_attribute arm_cspmu_identifier_attr =
280 __ATTR(identifier, 0444, arm_cspmu_identifier_show, NULL);
282 static struct attribute *arm_cspmu_identifier_attrs[] = {
283 &arm_cspmu_identifier_attr.attr,
287 static struct attribute_group arm_cspmu_identifier_attr_group = {
288 .attrs = arm_cspmu_identifier_attrs,
291 static const char *arm_cspmu_get_identifier(const struct arm_cspmu *cspmu)
293 const char *identifier =
294 devm_kasprintf(cspmu->dev, GFP_KERNEL, "%x",
299 static const char *arm_cspmu_type_str[ACPI_APMT_NODE_TYPE_COUNT] = {
307 static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
310 struct acpi_apmt_node *apmt_node;
313 char acpi_hid_string[ACPI_ID_LEN] = { 0 };
314 static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 };
317 apmt_node = arm_cspmu_apmt_node(dev);
318 pmu_type = apmt_node->type;
320 if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
321 dev_err(dev, "unsupported PMU type-%u\n", pmu_type);
325 if (pmu_type == ACPI_APMT_NODE_TYPE_ACPI) {
326 memcpy(acpi_hid_string,
327 &apmt_node->inst_primary,
328 sizeof(apmt_node->inst_primary));
329 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%s_%u", PMUNAME,
330 arm_cspmu_type_str[pmu_type],
332 apmt_node->inst_secondary);
334 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%d", PMUNAME,
335 arm_cspmu_type_str[pmu_type],
336 atomic_fetch_inc(&pmu_idx[pmu_type]));
342 static ssize_t arm_cspmu_cpumask_show(struct device *dev,
343 struct device_attribute *attr,
346 struct pmu *pmu = dev_get_drvdata(dev);
347 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
348 struct dev_ext_attribute *eattr =
349 container_of(attr, struct dev_ext_attribute, attr);
350 unsigned long mask_id = (unsigned long)eattr->var;
351 const cpumask_t *cpumask;
354 case ARM_CSPMU_ACTIVE_CPU_MASK:
355 cpumask = &cspmu->active_cpu;
357 case ARM_CSPMU_ASSOCIATED_CPU_MASK:
358 cpumask = &cspmu->associated_cpus;
363 return cpumap_print_to_pagebuf(true, buf, cpumask);
366 static struct attribute *arm_cspmu_cpumask_attrs[] = {
367 ARM_CSPMU_CPUMASK_ATTR(cpumask, ARM_CSPMU_ACTIVE_CPU_MASK),
368 ARM_CSPMU_CPUMASK_ATTR(associated_cpus, ARM_CSPMU_ASSOCIATED_CPU_MASK),
372 static struct attribute_group arm_cspmu_cpumask_attr_group = {
373 .attrs = arm_cspmu_cpumask_attrs,
379 int (*impl_init_ops)(struct arm_cspmu *cspmu);
382 static const struct impl_match impl_match[] = {
384 .pmiidr = ARM_CSPMU_IMPL_ID_NVIDIA,
385 .mask = ARM_CSPMU_PMIIDR_IMPLEMENTER,
386 .impl_init_ops = nv_cspmu_init_ops
391 static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
394 struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
395 struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
396 const struct impl_match *match = impl_match;
399 * Get PMU implementer and product id from APMT node.
400 * If APMT node doesn't have implementer/product id, try get it
404 (apmt_node->impl_id) ? apmt_node->impl_id :
405 readl(cspmu->base0 + PMIIDR);
407 /* Find implementer specific attribute ops. */
408 for (; match->pmiidr; match++) {
409 const u32 mask = match->mask;
411 if ((match->pmiidr & mask) == (cspmu->impl.pmiidr & mask)) {
412 ret = match->impl_init_ops(cspmu);
420 /* Use default callbacks if implementer doesn't provide one. */
421 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs);
422 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs);
423 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier);
424 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name);
425 CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event);
426 CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type);
427 CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter);
428 CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible);
433 static struct attribute_group *
434 arm_cspmu_alloc_event_attr_group(struct arm_cspmu *cspmu)
436 struct attribute_group *event_group;
437 struct device *dev = cspmu->dev;
438 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
441 devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
445 event_group->name = "events";
446 event_group->is_visible = impl_ops->event_attr_is_visible;
447 event_group->attrs = impl_ops->get_event_attrs(cspmu);
449 if (!event_group->attrs)
455 static struct attribute_group *
456 arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
458 struct attribute_group *format_group;
459 struct device *dev = cspmu->dev;
462 devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
466 format_group->name = "format";
467 format_group->attrs = cspmu->impl.ops.get_format_attrs(cspmu);
469 if (!format_group->attrs)
475 static struct attribute_group **
476 arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
478 struct attribute_group **attr_groups = NULL;
479 struct device *dev = cspmu->dev;
480 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
483 ret = arm_cspmu_init_impl_ops(cspmu);
487 cspmu->identifier = impl_ops->get_identifier(cspmu);
488 cspmu->name = impl_ops->get_name(cspmu);
490 if (!cspmu->identifier || !cspmu->name)
493 attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *),
498 attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
499 attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
500 attr_groups[2] = &arm_cspmu_identifier_attr_group;
501 attr_groups[3] = &arm_cspmu_cpumask_attr_group;
503 if (!attr_groups[0] || !attr_groups[1])
509 static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
515 writel(pmcr, cspmu->base0 + PMCR);
518 static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
520 writel(PMCR_E, cspmu->base0 + PMCR);
523 static inline void arm_cspmu_stop_counters(struct arm_cspmu *cspmu)
525 writel(0, cspmu->base0 + PMCR);
528 static void arm_cspmu_enable(struct pmu *pmu)
531 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
533 disabled = bitmap_empty(cspmu->hw_events.used_ctrs,
534 cspmu->num_logical_ctrs);
539 arm_cspmu_start_counters(cspmu);
542 static void arm_cspmu_disable(struct pmu *pmu)
544 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
546 arm_cspmu_stop_counters(cspmu);
549 static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
550 struct perf_event *event)
553 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
555 if (supports_cycle_counter(cspmu)) {
556 if (cspmu->impl.ops.is_cycle_counter_event(event)) {
557 /* Search for available cycle counter. */
558 if (test_and_set_bit(cspmu->cycle_counter_logical_idx,
559 hw_events->used_ctrs))
562 return cspmu->cycle_counter_logical_idx;
566 * Search a regular counter from the used counter bitmap.
567 * The cycle counter divides the bitmap into two parts. Search
568 * the first then second half to exclude the cycle counter bit.
570 idx = find_first_zero_bit(hw_events->used_ctrs,
571 cspmu->cycle_counter_logical_idx);
572 if (idx >= cspmu->cycle_counter_logical_idx) {
573 idx = find_next_zero_bit(
574 hw_events->used_ctrs,
575 cspmu->num_logical_ctrs,
576 cspmu->cycle_counter_logical_idx + 1);
579 idx = find_first_zero_bit(hw_events->used_ctrs,
580 cspmu->num_logical_ctrs);
583 if (idx >= cspmu->num_logical_ctrs)
586 set_bit(idx, hw_events->used_ctrs);
591 static bool arm_cspmu_validate_event(struct pmu *pmu,
592 struct arm_cspmu_hw_events *hw_events,
593 struct perf_event *event)
595 if (is_software_event(event))
598 /* Reject groups spanning multiple HW PMUs. */
599 if (event->pmu != pmu)
602 return (arm_cspmu_get_event_idx(hw_events, event) >= 0);
606 * Make sure the group of events can be scheduled at once
609 static bool arm_cspmu_validate_group(struct perf_event *event)
611 struct perf_event *sibling, *leader = event->group_leader;
612 struct arm_cspmu_hw_events fake_hw_events;
614 if (event->group_leader == event)
617 memset(&fake_hw_events, 0, sizeof(fake_hw_events));
619 if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events, leader))
622 for_each_sibling_event(sibling, leader) {
623 if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events,
628 return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
631 static int arm_cspmu_event_init(struct perf_event *event)
633 struct arm_cspmu *cspmu;
634 struct hw_perf_event *hwc = &event->hw;
636 cspmu = to_arm_cspmu(event->pmu);
639 * Following other "uncore" PMUs, we do not support sampling mode or
640 * attach to a task (per-process mode).
642 if (is_sampling_event(event)) {
643 dev_dbg(cspmu->pmu.dev,
644 "Can't support sampling events\n");
648 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
649 dev_dbg(cspmu->pmu.dev,
650 "Can't support per-task counters\n");
655 * Make sure the CPU assignment is on one of the CPUs associated with
658 if (!cpumask_test_cpu(event->cpu, &cspmu->associated_cpus)) {
659 dev_dbg(cspmu->pmu.dev,
660 "Requested cpu is not associated with the PMU\n");
664 /* Enforce the current active CPU to handle the events in this PMU. */
665 event->cpu = cpumask_first(&cspmu->active_cpu);
666 if (event->cpu >= nr_cpu_ids)
669 if (!arm_cspmu_validate_group(event))
673 * The logical counter id is tracked with hw_perf_event.extra_reg.idx.
674 * The physical counter id is tracked with hw_perf_event.idx.
675 * We don't assign an index until we actually place the event onto
676 * hardware. Use -1 to signify that we haven't decided where to put it
680 hwc->extra_reg.idx = -1;
681 hwc->config = cspmu->impl.ops.event_type(event);
686 static inline u32 counter_offset(u32 reg_sz, u32 ctr_idx)
688 return (PMEVCNTR_LO + (reg_sz * ctr_idx));
691 static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
694 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
696 if (use_64b_counter_reg(cspmu)) {
697 offset = counter_offset(sizeof(u64), event->hw.idx);
699 writeq(val, cspmu->base1 + offset);
701 offset = counter_offset(sizeof(u32), event->hw.idx);
703 writel(lower_32_bits(val), cspmu->base1 + offset);
707 static u64 arm_cspmu_read_counter(struct perf_event *event)
710 const void __iomem *counter_addr;
711 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
713 if (use_64b_counter_reg(cspmu)) {
714 offset = counter_offset(sizeof(u64), event->hw.idx);
715 counter_addr = cspmu->base1 + offset;
717 return cspmu->has_atomic_dword ?
718 readq(counter_addr) :
719 read_reg64_hilohi(counter_addr, HILOHI_MAX_POLL);
722 offset = counter_offset(sizeof(u32), event->hw.idx);
723 return readl(cspmu->base1 + offset);
727 * arm_cspmu_set_event_period: Set the period for the counter.
729 * To handle cases of extreme interrupt latency, we program
730 * the counter with half of the max count for the counters.
732 static void arm_cspmu_set_event_period(struct perf_event *event)
734 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
735 u64 val = counter_mask(cspmu) >> 1ULL;
737 local64_set(&event->hw.prev_count, val);
738 arm_cspmu_write_counter(event, val);
741 static void arm_cspmu_enable_counter(struct arm_cspmu *cspmu, int idx)
743 u32 reg_id, reg_bit, inten_off, cnten_off;
745 reg_id = COUNTER_TO_SET_CLR_ID(idx);
746 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
748 inten_off = PMINTENSET + (4 * reg_id);
749 cnten_off = PMCNTENSET + (4 * reg_id);
751 writel(BIT(reg_bit), cspmu->base0 + inten_off);
752 writel(BIT(reg_bit), cspmu->base0 + cnten_off);
755 static void arm_cspmu_disable_counter(struct arm_cspmu *cspmu, int idx)
757 u32 reg_id, reg_bit, inten_off, cnten_off;
759 reg_id = COUNTER_TO_SET_CLR_ID(idx);
760 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
762 inten_off = PMINTENCLR + (4 * reg_id);
763 cnten_off = PMCNTENCLR + (4 * reg_id);
765 writel(BIT(reg_bit), cspmu->base0 + cnten_off);
766 writel(BIT(reg_bit), cspmu->base0 + inten_off);
769 static void arm_cspmu_event_update(struct perf_event *event)
771 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
772 struct hw_perf_event *hwc = &event->hw;
773 u64 delta, prev, now;
776 prev = local64_read(&hwc->prev_count);
777 now = arm_cspmu_read_counter(event);
778 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
780 delta = (now - prev) & counter_mask(cspmu);
781 local64_add(delta, &event->count);
784 static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
785 struct hw_perf_event *hwc)
787 u32 offset = PMEVTYPER + (4 * hwc->idx);
789 writel(hwc->config, cspmu->base0 + offset);
792 static inline void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
793 struct hw_perf_event *hwc,
796 u32 offset = PMEVFILTR + (4 * hwc->idx);
798 writel(filter, cspmu->base0 + offset);
801 static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
803 u32 offset = PMCCFILTR;
805 writel(filter, cspmu->base0 + offset);
808 static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
810 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
811 struct hw_perf_event *hwc = &event->hw;
814 /* We always reprogram the counter */
815 if (pmu_flags & PERF_EF_RELOAD)
816 WARN_ON(!(hwc->state & PERF_HES_UPTODATE));
818 arm_cspmu_set_event_period(event);
820 filter = cspmu->impl.ops.event_filter(event);
822 if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
823 arm_cspmu_set_cc_filter(cspmu, filter);
825 arm_cspmu_set_event(cspmu, hwc);
826 arm_cspmu_set_ev_filter(cspmu, hwc, filter);
831 arm_cspmu_enable_counter(cspmu, hwc->idx);
834 static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
836 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
837 struct hw_perf_event *hwc = &event->hw;
839 if (hwc->state & PERF_HES_STOPPED)
842 arm_cspmu_disable_counter(cspmu, hwc->idx);
843 arm_cspmu_event_update(event);
845 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
848 static inline u32 to_phys_idx(struct arm_cspmu *cspmu, u32 idx)
850 return (idx == cspmu->cycle_counter_logical_idx) ?
851 ARM_CSPMU_CYCLE_CNTR_IDX : idx;
854 static int arm_cspmu_add(struct perf_event *event, int flags)
856 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
857 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
858 struct hw_perf_event *hwc = &event->hw;
861 if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
862 &cspmu->associated_cpus)))
865 idx = arm_cspmu_get_event_idx(hw_events, event);
869 hw_events->events[idx] = event;
870 hwc->idx = to_phys_idx(cspmu, idx);
871 hwc->extra_reg.idx = idx;
872 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
874 if (flags & PERF_EF_START)
875 arm_cspmu_start(event, PERF_EF_RELOAD);
877 /* Propagate changes to the userspace mapping. */
878 perf_event_update_userpage(event);
883 static void arm_cspmu_del(struct perf_event *event, int flags)
885 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
886 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
887 struct hw_perf_event *hwc = &event->hw;
888 int idx = hwc->extra_reg.idx;
890 arm_cspmu_stop(event, PERF_EF_UPDATE);
892 hw_events->events[idx] = NULL;
894 clear_bit(idx, hw_events->used_ctrs);
896 perf_event_update_userpage(event);
899 static void arm_cspmu_read(struct perf_event *event)
901 arm_cspmu_event_update(event);
904 static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
906 struct acpi_apmt_node *apmt_node;
907 struct arm_cspmu *cspmu;
908 struct device *dev = &pdev->dev;
910 cspmu = devm_kzalloc(dev, sizeof(*cspmu), GFP_KERNEL);
915 platform_set_drvdata(pdev, cspmu);
917 apmt_node = arm_cspmu_apmt_node(dev);
918 cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
923 static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
926 struct platform_device *pdev;
929 pdev = to_platform_device(dev);
931 /* Base address for page 0. */
932 cspmu->base0 = devm_platform_ioremap_resource(pdev, 0);
933 if (IS_ERR(cspmu->base0)) {
934 dev_err(dev, "ioremap failed for page-0 resource\n");
935 return PTR_ERR(cspmu->base0);
938 /* Base address for page 1 if supported. Otherwise point to page 0. */
939 cspmu->base1 = cspmu->base0;
940 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) {
941 cspmu->base1 = devm_platform_ioremap_resource(pdev, 1);
942 if (IS_ERR(cspmu->base1)) {
943 dev_err(dev, "ioremap failed for page-1 resource\n");
944 return PTR_ERR(cspmu->base1);
948 cspmu->pmcfgr = readl(cspmu->base0 + PMCFGR);
950 cspmu->num_logical_ctrs = FIELD_GET(PMCFGR_N, cspmu->pmcfgr) + 1;
952 cspmu->cycle_counter_logical_idx = ARM_CSPMU_MAX_HW_CNTRS;
954 if (supports_cycle_counter(cspmu)) {
956 * The last logical counter is mapped to cycle counter if
957 * there is a gap between regular and cycle counter. Otherwise,
958 * logical and physical have 1-to-1 mapping.
960 cspmu->cycle_counter_logical_idx =
961 (cspmu->num_logical_ctrs <= ARM_CSPMU_CYCLE_CNTR_IDX) ?
962 cspmu->num_logical_ctrs - 1 :
963 ARM_CSPMU_CYCLE_CNTR_IDX;
966 cspmu->num_set_clr_reg =
967 DIV_ROUND_UP(cspmu->num_logical_ctrs,
968 ARM_CSPMU_SET_CLR_COUNTER_NUM);
970 cspmu->hw_events.events =
971 devm_kcalloc(dev, cspmu->num_logical_ctrs,
972 sizeof(*cspmu->hw_events.events), GFP_KERNEL);
974 if (!cspmu->hw_events.events)
980 static inline int arm_cspmu_get_reset_overflow(struct arm_cspmu *cspmu,
984 u32 pmovclr_offset = PMOVSCLR;
985 u32 has_overflowed = 0;
987 for (i = 0; i < cspmu->num_set_clr_reg; ++i) {
988 pmovs[i] = readl(cspmu->base1 + pmovclr_offset);
989 has_overflowed |= pmovs[i];
990 writel(pmovs[i], cspmu->base1 + pmovclr_offset);
991 pmovclr_offset += sizeof(u32);
994 return has_overflowed != 0;
997 static irqreturn_t arm_cspmu_handle_irq(int irq_num, void *dev)
999 int idx, has_overflowed;
1000 struct perf_event *event;
1001 struct arm_cspmu *cspmu = dev;
1002 DECLARE_BITMAP(pmovs, ARM_CSPMU_MAX_HW_CNTRS);
1003 bool handled = false;
1005 arm_cspmu_stop_counters(cspmu);
1007 has_overflowed = arm_cspmu_get_reset_overflow(cspmu, (u32 *)pmovs);
1008 if (!has_overflowed)
1011 for_each_set_bit(idx, cspmu->hw_events.used_ctrs,
1012 cspmu->num_logical_ctrs) {
1013 event = cspmu->hw_events.events[idx];
1018 if (!test_bit(event->hw.idx, pmovs))
1021 arm_cspmu_event_update(event);
1022 arm_cspmu_set_event_period(event);
1028 arm_cspmu_start_counters(cspmu);
1029 return IRQ_RETVAL(handled);
1032 static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
1036 struct platform_device *pdev;
1039 pdev = to_platform_device(dev);
1041 /* Skip IRQ request if the PMU does not support overflow interrupt. */
1042 irq = platform_get_irq_optional(pdev, 0);
1044 return irq == -ENXIO ? 0 : irq;
1046 ret = devm_request_irq(dev, irq, arm_cspmu_handle_irq,
1047 IRQF_NOBALANCING | IRQF_NO_THREAD, dev_name(dev),
1050 dev_err(dev, "Could not request IRQ %d\n", irq);
1059 #if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
1060 #include <acpi/processor.h>
1062 static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
1065 struct device *cpu_dev;
1066 struct acpi_device *acpi_dev;
1068 cpu_dev = get_cpu_device(cpu);
1072 acpi_dev = ACPI_COMPANION(cpu_dev);
1074 if (!strcmp(acpi_device_hid(acpi_dev),
1075 ACPI_PROCESSOR_CONTAINER_HID) &&
1076 !kstrtouint(acpi_device_uid(acpi_dev), 0, &acpi_uid) &&
1077 acpi_uid == container_uid)
1080 acpi_dev = acpi_dev_parent(acpi_dev);
1086 static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1088 struct acpi_apmt_node *apmt_node;
1092 apmt_node = arm_cspmu_apmt_node(cspmu->dev);
1093 affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;
1095 if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
1096 for_each_possible_cpu(cpu) {
1097 if (apmt_node->proc_affinity ==
1098 get_acpi_id_for_cpu(cpu)) {
1099 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1104 for_each_possible_cpu(cpu) {
1105 if (arm_cspmu_find_cpu_container(
1106 cpu, apmt_node->proc_affinity))
1109 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1113 if (cpumask_empty(&cspmu->associated_cpus)) {
1114 dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
1121 static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1127 static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
1129 return arm_cspmu_acpi_get_cpus(cspmu);
1132 static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
1134 int ret, capabilities;
1135 struct attribute_group **attr_groups;
1137 attr_groups = arm_cspmu_alloc_attr_group(cspmu);
1141 ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
1142 &cspmu->cpuhp_node);
1146 capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1147 if (cspmu->irq == 0)
1148 capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1150 cspmu->pmu = (struct pmu){
1151 .task_ctx_nr = perf_invalid_context,
1152 .module = THIS_MODULE,
1153 .pmu_enable = arm_cspmu_enable,
1154 .pmu_disable = arm_cspmu_disable,
1155 .event_init = arm_cspmu_event_init,
1156 .add = arm_cspmu_add,
1157 .del = arm_cspmu_del,
1158 .start = arm_cspmu_start,
1159 .stop = arm_cspmu_stop,
1160 .read = arm_cspmu_read,
1161 .attr_groups = (const struct attribute_group **)attr_groups,
1162 .capabilities = capabilities,
1165 /* Hardware counter init */
1166 arm_cspmu_stop_counters(cspmu);
1167 arm_cspmu_reset_counters(cspmu);
1169 ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
1171 cpuhp_state_remove_instance(arm_cspmu_cpuhp_state,
1172 &cspmu->cpuhp_node);
1178 static int arm_cspmu_device_probe(struct platform_device *pdev)
1181 struct arm_cspmu *cspmu;
1183 cspmu = arm_cspmu_alloc(pdev);
1187 ret = arm_cspmu_init_mmio(cspmu);
1191 ret = arm_cspmu_request_irq(cspmu);
1195 ret = arm_cspmu_get_cpus(cspmu);
1199 ret = arm_cspmu_register_pmu(cspmu);
1206 static int arm_cspmu_device_remove(struct platform_device *pdev)
1208 struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
1210 perf_pmu_unregister(&cspmu->pmu);
1211 cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
1216 static const struct platform_device_id arm_cspmu_id[] = {
1220 MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
1222 static struct platform_driver arm_cspmu_driver = {
1225 .suppress_bind_attrs = true,
1227 .probe = arm_cspmu_device_probe,
1228 .remove = arm_cspmu_device_remove,
1229 .id_table = arm_cspmu_id,
1232 static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
1234 cpumask_set_cpu(cpu, &cspmu->active_cpu);
1236 WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
1239 static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1241 struct arm_cspmu *cspmu =
1242 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1244 if (!cpumask_test_cpu(cpu, &cspmu->associated_cpus))
1247 /* If the PMU is already managed, there is nothing to do */
1248 if (!cpumask_empty(&cspmu->active_cpu))
1251 /* Use this CPU for event counting */
1252 arm_cspmu_set_active_cpu(cpu, cspmu);
1257 static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1260 struct cpumask online_supported;
1262 struct arm_cspmu *cspmu =
1263 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1265 /* Nothing to do if this CPU doesn't own the PMU */
1266 if (!cpumask_test_and_clear_cpu(cpu, &cspmu->active_cpu))
1269 /* Choose a new CPU to migrate ownership of the PMU to */
1270 cpumask_and(&online_supported, &cspmu->associated_cpus,
1272 dst = cpumask_any_but(&online_supported, cpu);
1273 if (dst >= nr_cpu_ids)
1276 /* Use this CPU for event counting */
1277 perf_pmu_migrate_context(&cspmu->pmu, cpu, dst);
1278 arm_cspmu_set_active_cpu(dst, cspmu);
1283 static int __init arm_cspmu_init(void)
1287 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1288 "perf/arm/cspmu:online",
1289 arm_cspmu_cpu_online,
1290 arm_cspmu_cpu_teardown);
1293 arm_cspmu_cpuhp_state = ret;
1294 return platform_driver_register(&arm_cspmu_driver);
1297 static void __exit arm_cspmu_exit(void)
1299 platform_driver_unregister(&arm_cspmu_driver);
1300 cpuhp_remove_multi_state(arm_cspmu_cpuhp_state);
1303 module_init(arm_cspmu_init);
1304 module_exit(arm_cspmu_exit);
1306 MODULE_LICENSE("GPL v2");