1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016 Freescale Semiconductor, Inc.
7 #include <linux/bitfield.h>
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/perf_event.h>
17 #include <linux/slab.h>
19 #define COUNTER_CNTL 0x0
20 #define COUNTER_READ 0x20
22 #define COUNTER_DPCR1 0x30
25 #define CNTL_CLEAR 0x2
27 #define CNTL_EN_MASK 0xFFFFFFFB
28 #define CNTL_CLEAR_MASK 0xFFFFFFFD
29 #define CNTL_OVER_MASK 0xFFFFFFFE
31 #define CNTL_CSV_SHIFT 24
32 #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
34 #define EVENT_CYCLES_ID 0
35 #define EVENT_CYCLES_COUNTER 0
36 #define NUM_COUNTERS 4
38 #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
42 #define DDR_PERF_DEV_NAME "imx8_ddr"
43 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
45 static DEFINE_IDA(ddr_ida);
47 /* DDR Perf hardware feature */
48 #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
49 #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
51 struct fsl_ddr_devtype_data {
52 unsigned int quirks; /* quirks needed for different DDR Perf core */
53 const char *identifier; /* system PMU identifier for userspace */
56 static const struct fsl_ddr_devtype_data imx8_devtype_data;
58 static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
59 .quirks = DDR_CAP_AXI_ID_FILTER,
62 static const struct fsl_ddr_devtype_data imx8mq_devtype_data = {
63 .quirks = DDR_CAP_AXI_ID_FILTER,
64 .identifier = "i.MX8MQ",
67 static const struct fsl_ddr_devtype_data imx8mm_devtype_data = {
68 .quirks = DDR_CAP_AXI_ID_FILTER,
69 .identifier = "i.MX8MM",
72 static const struct fsl_ddr_devtype_data imx8mn_devtype_data = {
73 .quirks = DDR_CAP_AXI_ID_FILTER,
74 .identifier = "i.MX8MN",
77 static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
78 .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
79 .identifier = "i.MX8MP",
82 static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
83 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
84 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
85 { .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data},
86 { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
87 { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
88 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
91 MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
97 struct hlist_node node;
99 struct perf_event *events[NUM_COUNTERS];
101 enum cpuhp_state cpuhp_state;
102 const struct fsl_ddr_devtype_data *devtype_data;
107 static ssize_t ddr_perf_identifier_show(struct device *dev,
108 struct device_attribute *attr,
111 struct ddr_pmu *pmu = dev_get_drvdata(dev);
113 return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
116 static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
117 struct attribute *attr,
120 struct device *dev = kobj_to_dev(kobj);
121 struct ddr_pmu *pmu = dev_get_drvdata(dev);
123 if (!pmu->devtype_data->identifier)
128 static struct device_attribute ddr_perf_identifier_attr =
129 __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
131 static struct attribute *ddr_perf_identifier_attrs[] = {
132 &ddr_perf_identifier_attr.attr,
136 static const struct attribute_group ddr_perf_identifier_attr_group = {
137 .attrs = ddr_perf_identifier_attrs,
138 .is_visible = ddr_perf_identifier_attr_visible,
141 enum ddr_perf_filter_capabilities {
142 PERF_CAP_AXI_ID_FILTER = 0,
143 PERF_CAP_AXI_ID_FILTER_ENHANCED,
144 PERF_CAP_AXI_ID_FEAT_MAX,
147 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
149 u32 quirks = pmu->devtype_data->quirks;
152 case PERF_CAP_AXI_ID_FILTER:
153 return !!(quirks & DDR_CAP_AXI_ID_FILTER);
154 case PERF_CAP_AXI_ID_FILTER_ENHANCED:
155 quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
156 return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
158 WARN(1, "unknown filter cap %d\n", cap);
164 static ssize_t ddr_perf_filter_cap_show(struct device *dev,
165 struct device_attribute *attr,
168 struct ddr_pmu *pmu = dev_get_drvdata(dev);
169 struct dev_ext_attribute *ea =
170 container_of(attr, struct dev_ext_attribute, attr);
171 int cap = (long)ea->var;
173 return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
176 #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
177 (&((struct dev_ext_attribute) { \
178 __ATTR(_name, 0444, _func, NULL), (void *)_var \
181 #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
182 PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
184 static struct attribute *ddr_perf_filter_cap_attr[] = {
185 PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
186 PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
190 static const struct attribute_group ddr_perf_filter_cap_attr_group = {
192 .attrs = ddr_perf_filter_cap_attr,
195 static ssize_t ddr_perf_cpumask_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
198 struct ddr_pmu *pmu = dev_get_drvdata(dev);
200 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
203 static struct device_attribute ddr_perf_cpumask_attr =
204 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
206 static struct attribute *ddr_perf_cpumask_attrs[] = {
207 &ddr_perf_cpumask_attr.attr,
211 static const struct attribute_group ddr_perf_cpumask_attr_group = {
212 .attrs = ddr_perf_cpumask_attrs,
216 ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
219 struct perf_pmu_events_attr *pmu_attr;
221 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
222 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
225 #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
226 (&((struct perf_pmu_events_attr[]) { \
227 { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
231 static struct attribute *ddr_perf_events_attrs[] = {
232 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
233 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
234 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
235 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
236 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
237 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
238 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
239 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
240 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
241 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
242 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
243 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
244 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
245 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
246 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
247 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
248 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
249 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
250 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
251 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
252 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
253 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
254 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
255 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
256 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
257 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
258 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
259 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
260 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
261 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
262 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
263 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
267 static const struct attribute_group ddr_perf_events_attr_group = {
269 .attrs = ddr_perf_events_attrs,
272 PMU_FORMAT_ATTR(event, "config:0-7");
273 PMU_FORMAT_ATTR(axi_id, "config1:0-15");
274 PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
276 static struct attribute *ddr_perf_format_attrs[] = {
277 &format_attr_event.attr,
278 &format_attr_axi_id.attr,
279 &format_attr_axi_mask.attr,
283 static const struct attribute_group ddr_perf_format_attr_group = {
285 .attrs = ddr_perf_format_attrs,
288 static const struct attribute_group *attr_groups[] = {
289 &ddr_perf_events_attr_group,
290 &ddr_perf_format_attr_group,
291 &ddr_perf_cpumask_attr_group,
292 &ddr_perf_filter_cap_attr_group,
293 &ddr_perf_identifier_attr_group,
297 static bool ddr_perf_is_filtered(struct perf_event *event)
299 return event->attr.config == 0x41 || event->attr.config == 0x42;
302 static u32 ddr_perf_filter_val(struct perf_event *event)
304 return event->attr.config1;
307 static bool ddr_perf_filters_compatible(struct perf_event *a,
308 struct perf_event *b)
310 if (!ddr_perf_is_filtered(a))
312 if (!ddr_perf_is_filtered(b))
314 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
317 static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
320 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
322 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
323 return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
324 ddr_perf_is_filtered(event);
327 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
332 * Always map cycle event to counter 0
333 * Cycles counter is dedicated for cycle event
334 * can't used for the other events
336 if (event == EVENT_CYCLES_ID) {
337 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
338 return EVENT_CYCLES_COUNTER;
343 for (i = 1; i < NUM_COUNTERS; i++) {
344 if (pmu->events[i] == NULL)
351 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
353 pmu->events[counter] = NULL;
356 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
358 struct perf_event *event = pmu->events[counter];
359 void __iomem *base = pmu->base;
362 * return bytes instead of bursts from ddr transaction for
363 * axid-read and axid-write event if PMU core supports enhanced
366 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
368 return readl_relaxed(base + counter * 4);
371 static int ddr_perf_event_init(struct perf_event *event)
373 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
374 struct hw_perf_event *hwc = &event->hw;
375 struct perf_event *sibling;
377 if (event->attr.type != event->pmu->type)
380 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
383 if (event->cpu < 0) {
384 dev_warn(pmu->dev, "Can't provide per-task data!\n");
389 * We must NOT create groups containing mixed PMUs, although software
390 * events are acceptable (for example to create a CCN group
391 * periodically read when a hrtimer aka cpu-clock leader triggers).
393 if (event->group_leader->pmu != event->pmu &&
394 !is_software_event(event->group_leader))
397 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
398 if (!ddr_perf_filters_compatible(event, event->group_leader))
400 for_each_sibling_event(sibling, event->group_leader) {
401 if (!ddr_perf_filters_compatible(event, sibling))
406 for_each_sibling_event(sibling, event->group_leader) {
407 if (sibling->pmu != event->pmu &&
408 !is_software_event(sibling))
412 event->cpu = pmu->cpu;
418 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
419 int counter, bool enable)
421 u8 reg = counter * 4 + COUNTER_CNTL;
426 * cycle counter is special which should firstly write 0 then
427 * write 1 into CLEAR bit to clear it. Other counters only
428 * need write 0 into CLEAR bit and it turns out to be 1 by
429 * hardware. Below enable flow is harmless for all counters.
431 writel(0, pmu->base + reg);
432 val = CNTL_EN | CNTL_CLEAR;
433 val |= FIELD_PREP(CNTL_CSV_MASK, config);
434 writel(val, pmu->base + reg);
436 /* Disable counter */
437 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
438 writel(val, pmu->base + reg);
442 static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
446 val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
448 return val & CNTL_OVER;
451 static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter)
453 u8 reg = counter * 4 + COUNTER_CNTL;
456 val = readl_relaxed(pmu->base + reg);
458 writel(val, pmu->base + reg);
461 writel(val, pmu->base + reg);
464 static void ddr_perf_event_update(struct perf_event *event)
466 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
467 struct hw_perf_event *hwc = &event->hw;
469 int counter = hwc->idx;
472 new_raw_count = ddr_perf_read_counter(pmu, counter);
473 local64_add(new_raw_count, &event->count);
476 * For legacy SoCs: event counter continue counting when overflow,
477 * no need to clear the counter.
478 * For new SoCs: event counter stop counting when overflow, need
479 * clear counter to let it count again.
481 if (counter != EVENT_CYCLES_COUNTER) {
482 ret = ddr_perf_counter_overflow(pmu, counter);
484 dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n",
488 /* clear counter every time for both cycle counter and event counter */
489 ddr_perf_counter_clear(pmu, counter);
492 static void ddr_perf_event_start(struct perf_event *event, int flags)
494 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
495 struct hw_perf_event *hwc = &event->hw;
496 int counter = hwc->idx;
498 local64_set(&hwc->prev_count, 0);
500 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
505 static int ddr_perf_event_add(struct perf_event *event, int flags)
507 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
508 struct hw_perf_event *hwc = &event->hw;
510 int cfg = event->attr.config;
511 int cfg1 = event->attr.config1;
513 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
516 for (i = 1; i < NUM_COUNTERS; i++) {
517 if (pmu->events[i] &&
518 !ddr_perf_filters_compatible(event, pmu->events[i]))
522 if (ddr_perf_is_filtered(event)) {
523 /* revert axi id masking(axi_mask) value */
524 cfg1 ^= AXI_MASKING_REVERT;
525 writel(cfg1, pmu->base + COUNTER_DPCR1);
529 counter = ddr_perf_alloc_counter(pmu, cfg);
531 dev_dbg(pmu->dev, "There are not enough counters\n");
535 pmu->events[counter] = event;
536 pmu->active_events++;
539 hwc->state |= PERF_HES_STOPPED;
541 if (flags & PERF_EF_START)
542 ddr_perf_event_start(event, flags);
547 static void ddr_perf_event_stop(struct perf_event *event, int flags)
549 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
550 struct hw_perf_event *hwc = &event->hw;
551 int counter = hwc->idx;
553 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
554 ddr_perf_event_update(event);
556 hwc->state |= PERF_HES_STOPPED;
559 static void ddr_perf_event_del(struct perf_event *event, int flags)
561 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
562 struct hw_perf_event *hwc = &event->hw;
563 int counter = hwc->idx;
565 ddr_perf_event_stop(event, PERF_EF_UPDATE);
567 ddr_perf_free_counter(pmu, counter);
568 pmu->active_events--;
572 static void ddr_perf_pmu_enable(struct pmu *pmu)
574 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
576 /* enable cycle counter if cycle is not active event list */
577 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
578 ddr_perf_counter_enable(ddr_pmu,
580 EVENT_CYCLES_COUNTER,
584 static void ddr_perf_pmu_disable(struct pmu *pmu)
586 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
588 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
589 ddr_perf_counter_enable(ddr_pmu,
591 EVENT_CYCLES_COUNTER,
595 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
598 *pmu = (struct ddr_pmu) {
599 .pmu = (struct pmu) {
600 .module = THIS_MODULE,
601 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
602 .task_ctx_nr = perf_invalid_context,
603 .attr_groups = attr_groups,
604 .event_init = ddr_perf_event_init,
605 .add = ddr_perf_event_add,
606 .del = ddr_perf_event_del,
607 .start = ddr_perf_event_start,
608 .stop = ddr_perf_event_stop,
609 .read = ddr_perf_event_update,
610 .pmu_enable = ddr_perf_pmu_enable,
611 .pmu_disable = ddr_perf_pmu_disable,
617 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
621 static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
624 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
625 struct perf_event *event;
627 /* all counter will stop if cycle counter disabled */
628 ddr_perf_counter_enable(pmu,
630 EVENT_CYCLES_COUNTER,
633 * When the cycle counter overflows, all counters are stopped,
634 * and an IRQ is raised. If any other counter overflows, it
635 * continues counting, and no IRQ is raised. But for new SoCs,
636 * such as i.MX8MP, event counter would stop when overflow, so
637 * we need use cycle counter to stop overflow of event counter.
639 * Cycles occur at least 4 times as often as other events, so we
640 * can update all events on a cycle counter overflow and not
644 for (i = 0; i < NUM_COUNTERS; i++) {
649 event = pmu->events[i];
651 ddr_perf_event_update(event);
654 ddr_perf_counter_enable(pmu,
656 EVENT_CYCLES_COUNTER,
662 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
664 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
670 target = cpumask_any_but(cpu_online_mask, cpu);
671 if (target >= nr_cpu_ids)
674 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
677 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
682 static int ddr_perf_probe(struct platform_device *pdev)
685 struct device_node *np;
692 base = devm_platform_ioremap_resource(pdev, 0);
694 return PTR_ERR(base);
696 np = pdev->dev.of_node;
698 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
702 num = ddr_perf_init(pmu, base, &pdev->dev);
704 platform_set_drvdata(pdev, pmu);
706 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
711 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
713 pmu->cpu = raw_smp_processor_id();
714 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
717 ddr_perf_offline_cpu);
720 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
721 goto cpuhp_state_err;
724 pmu->cpuhp_state = ret;
726 /* Register the pmu instance for cpu hotplug */
727 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
729 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
730 goto cpuhp_instance_err;
734 irq = of_irq_get(np, 0);
736 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
741 ret = devm_request_irq(&pdev->dev, irq,
742 ddr_perf_irq_handler,
743 IRQF_NOBALANCING | IRQF_NO_THREAD,
747 dev_err(&pdev->dev, "Request irq failed: %d", ret);
752 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
754 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
758 ret = perf_pmu_register(&pmu->pmu, name, -1);
765 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
767 cpuhp_remove_multi_state(pmu->cpuhp_state);
769 ida_simple_remove(&ddr_ida, pmu->id);
770 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
774 static int ddr_perf_remove(struct platform_device *pdev)
776 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
778 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
779 cpuhp_remove_multi_state(pmu->cpuhp_state);
780 irq_set_affinity_hint(pmu->irq, NULL);
782 perf_pmu_unregister(&pmu->pmu);
784 ida_simple_remove(&ddr_ida, pmu->id);
788 static struct platform_driver imx_ddr_pmu_driver = {
790 .name = "imx-ddr-pmu",
791 .of_match_table = imx_ddr_pmu_dt_ids,
792 .suppress_bind_attrs = true,
794 .probe = ddr_perf_probe,
795 .remove = ddr_perf_remove,
798 module_platform_driver(imx_ddr_pmu_driver);
799 MODULE_LICENSE("GPL v2");