2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2012 ARM Limited
17 * Author: Will Deacon <will.deacon@arm.com>
19 #define pr_fmt(fmt) "CPU PMU: " fmt
21 #include <linux/bitmap.h>
22 #include <linux/export.h>
23 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/irq.h>
29 #include <linux/irqdesc.h>
31 #include <asm/cputype.h>
32 #include <asm/irq_regs.h>
35 /* Set at runtime when we know what CPU type we are. */
36 static struct arm_pmu *cpu_pmu;
38 static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
39 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
40 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
41 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
44 * Despite the names, these two functions are CPU-specific and are used
45 * by the OProfile/perf code.
47 const char *perf_pmu_name(void)
54 EXPORT_SYMBOL_GPL(perf_pmu_name);
56 int perf_num_counters(void)
61 max_events = cpu_pmu->num_events;
65 EXPORT_SYMBOL_GPL(perf_num_counters);
67 /* Include the PMU-specific implementations. */
68 #include "perf_event_xscale.c"
69 #include "perf_event_v6.c"
70 #include "perf_event_v7.c"
72 static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
74 return this_cpu_ptr(&cpu_hw_events);
77 static void cpu_pmu_enable_percpu_irq(void *data)
79 int irq = *(int *)data;
81 enable_percpu_irq(irq, IRQ_TYPE_NONE);
84 static void cpu_pmu_disable_percpu_irq(void *data)
86 int irq = *(int *)data;
88 disable_percpu_irq(irq);
91 static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
94 struct platform_device *pmu_device = cpu_pmu->plat_device;
96 irqs = min(pmu_device->num_resources, num_possible_cpus());
98 irq = platform_get_irq(pmu_device, 0);
99 if (irq >= 0 && irq_is_percpu(irq)) {
100 on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
101 free_percpu_irq(irq, &percpu_pmu);
103 for (i = 0; i < irqs; ++i) {
104 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
106 irq = platform_get_irq(pmu_device, i);
108 free_irq(irq, cpu_pmu);
113 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
115 int i, err, irq, irqs;
116 struct platform_device *pmu_device = cpu_pmu->plat_device;
121 irqs = min(pmu_device->num_resources, num_possible_cpus());
123 printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
127 irq = platform_get_irq(pmu_device, 0);
128 if (irq >= 0 && irq_is_percpu(irq)) {
129 err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
131 pr_err("unable to request IRQ%d for ARM PMU counters\n",
135 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
137 for (i = 0; i < irqs; ++i) {
139 irq = platform_get_irq(pmu_device, i);
144 * If we have a single PMU interrupt that we can't shift,
145 * assume that we're running on a uniprocessor machine and
146 * continue. Otherwise, continue without this interrupt.
148 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
149 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
154 err = request_irq(irq, handler,
155 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
158 pr_err("unable to request IRQ%d for ARM PMU counters\n",
163 cpumask_set_cpu(i, &cpu_pmu->active_irqs);
170 static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
173 for_each_possible_cpu(cpu) {
174 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
175 events->events = per_cpu(hw_events, cpu);
176 events->used_mask = per_cpu(used_mask, cpu);
177 raw_spin_lock_init(&events->pmu_lock);
178 per_cpu(percpu_pmu, cpu) = cpu_pmu;
181 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
182 cpu_pmu->request_irq = cpu_pmu_request_irq;
183 cpu_pmu->free_irq = cpu_pmu_free_irq;
185 /* Ensure the PMU has sane values out of reset. */
187 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
189 /* If no interrupts available, set the corresponding capability flag */
190 if (!platform_get_irq(cpu_pmu->plat_device, 0))
191 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
195 * PMU hardware loses all context when a CPU goes offline.
196 * When a CPU is hotplugged back in, since some hardware registers are
197 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
198 * junk values out of them.
200 static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
203 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
206 if (cpu_pmu && cpu_pmu->reset)
207 cpu_pmu->reset(cpu_pmu);
214 static struct notifier_block cpu_pmu_hotplug_notifier = {
215 .notifier_call = cpu_pmu_notify,
219 * PMU platform driver and devicetree bindings.
221 static struct of_device_id cpu_pmu_of_device_ids[] = {
222 {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
223 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
224 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
225 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
226 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
227 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
228 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
229 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
230 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
231 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
232 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
236 static struct platform_device_id cpu_pmu_plat_device_ids[] = {
238 {.name = "armv6-pmu"},
239 {.name = "armv7-pmu"},
240 {.name = "xscale-pmu"},
245 * CPU PMU identification and probing.
247 static int probe_current_pmu(struct arm_pmu *pmu)
252 pr_info("probing PMU on CPU %d\n", cpu);
254 switch (read_cpuid_part()) {
256 case ARM_CPU_PART_ARM1136:
257 ret = armv6_1136_pmu_init(pmu);
259 case ARM_CPU_PART_ARM1156:
260 ret = armv6_1156_pmu_init(pmu);
262 case ARM_CPU_PART_ARM1176:
263 ret = armv6_1176_pmu_init(pmu);
265 case ARM_CPU_PART_ARM11MPCORE:
266 ret = armv6mpcore_pmu_init(pmu);
268 case ARM_CPU_PART_CORTEX_A8:
269 ret = armv7_a8_pmu_init(pmu);
271 case ARM_CPU_PART_CORTEX_A9:
272 ret = armv7_a9_pmu_init(pmu);
276 if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
277 switch (xscale_cpu_arch_version()) {
278 case ARM_CPU_XSCALE_ARCH_V1:
279 ret = xscale1pmu_init(pmu);
281 case ARM_CPU_XSCALE_ARCH_V2:
282 ret = xscale2pmu_init(pmu);
293 static int cpu_pmu_device_probe(struct platform_device *pdev)
295 const struct of_device_id *of_id;
296 const int (*init_fn)(struct arm_pmu *);
297 struct device_node *node = pdev->dev.of_node;
302 pr_info("attempt to register multiple PMU devices!");
306 pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
308 pr_info("failed to allocate PMU device!");
313 cpu_pmu->plat_device = pdev;
315 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
316 init_fn = of_id->data;
319 ret = probe_current_pmu(pmu);
323 pr_info("failed to probe PMU!");
327 cpu_pmu_init(cpu_pmu);
328 ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
334 pr_info("failed to register PMU devices!");
339 static struct platform_driver cpu_pmu_driver = {
342 .pm = &armpmu_dev_pm_ops,
343 .of_match_table = cpu_pmu_of_device_ids,
345 .probe = cpu_pmu_device_probe,
346 .id_table = cpu_pmu_plat_device_ids,
349 static int __init register_pmu_driver(void)
353 err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
357 err = platform_driver_register(&cpu_pmu_driver);
359 unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
363 device_initcall(register_pmu_driver);