1 // SPDX-License-Identifier: GPL-2.0
3 * CAVIUM THUNDERX2 SoC PMU UNCORE
4 * Copyright (C) 2018 Cavium Inc.
5 * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
8 #include <linux/acpi.h>
9 #include <linux/cpuhotplug.h>
10 #include <linux/perf_event.h>
11 #include <linux/platform_device.h>
13 /* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
14 * Each UNCORE PMU device consists of 4 independent programmable counters.
15 * Counters are 32 bit and do not support overflow interrupt,
16 * they need to be sampled before overflow(i.e, at every 2 seconds).
19 #define TX2_PMU_MAX_COUNTERS 4
20 #define TX2_PMU_DMC_CHANNELS 8
21 #define TX2_PMU_L3_TILES 16
23 #define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
24 #define GET_EVENTID(ev) ((ev->hw.config) & 0x1f)
25 #define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3)
26 /* 1 byte per counter(4 counters).
27 * Event id is encoded in bits [5:1] of a byte,
29 #define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
31 #define L3C_COUNTER_CTL 0xA8
32 #define L3C_COUNTER_DATA 0xAC
33 #define DMC_COUNTER_CTL 0x234
34 #define DMC_COUNTER_DATA 0x240
37 #define L3_EVENT_READ_REQ 0xD
38 #define L3_EVENT_WRITEBACK_REQ 0xE
39 #define L3_EVENT_INV_N_WRITE_REQ 0xF
40 #define L3_EVENT_INV_REQ 0x10
41 #define L3_EVENT_EVICT_REQ 0x13
42 #define L3_EVENT_INV_N_WRITE_HIT 0x14
43 #define L3_EVENT_INV_HIT 0x15
44 #define L3_EVENT_READ_HIT 0x17
45 #define L3_EVENT_MAX 0x18
48 #define DMC_EVENT_COUNT_CYCLES 0x1
49 #define DMC_EVENT_WRITE_TXNS 0xB
50 #define DMC_EVENT_DATA_TRANSFERS 0xD
51 #define DMC_EVENT_READ_TXNS 0xF
52 #define DMC_EVENT_MAX 0x10
54 enum tx2_uncore_type {
61 * pmu on each socket has 2 uncore devices(dmc and l3c),
62 * each device has 4 counters.
64 struct tx2_uncore_pmu {
65 struct hlist_node hpnode;
66 struct list_head entry;
76 DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
77 struct perf_event *events[TX2_PMU_MAX_COUNTERS];
79 struct hrtimer hrtimer;
80 const struct attribute_group **attr_groups;
81 enum tx2_uncore_type type;
82 void (*init_cntr_base)(struct perf_event *event,
83 struct tx2_uncore_pmu *tx2_pmu);
84 void (*stop_event)(struct perf_event *event);
85 void (*start_event)(struct perf_event *event, int flags);
88 static LIST_HEAD(tx2_pmus);
90 static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
92 return container_of(pmu, struct tx2_uncore_pmu, pmu);
95 PMU_FORMAT_ATTR(event, "config:0-4");
97 static struct attribute *l3c_pmu_format_attrs[] = {
98 &format_attr_event.attr,
102 static struct attribute *dmc_pmu_format_attrs[] = {
103 &format_attr_event.attr,
107 static const struct attribute_group l3c_pmu_format_attr_group = {
109 .attrs = l3c_pmu_format_attrs,
112 static const struct attribute_group dmc_pmu_format_attr_group = {
114 .attrs = dmc_pmu_format_attrs,
118 * sysfs event attributes
120 static ssize_t tx2_pmu_event_show(struct device *dev,
121 struct device_attribute *attr, char *buf)
123 struct dev_ext_attribute *eattr;
125 eattr = container_of(attr, struct dev_ext_attribute, attr);
126 return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
129 #define TX2_EVENT_ATTR(name, config) \
130 PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
131 config, tx2_pmu_event_show)
133 TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
134 TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
135 TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
136 TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
137 TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
138 TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
139 TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
140 TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
142 static struct attribute *l3c_pmu_events_attrs[] = {
143 &tx2_pmu_event_attr_read_request.attr.attr,
144 &tx2_pmu_event_attr_writeback_request.attr.attr,
145 &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
146 &tx2_pmu_event_attr_inv_request.attr.attr,
147 &tx2_pmu_event_attr_evict_request.attr.attr,
148 &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
149 &tx2_pmu_event_attr_inv_hit.attr.attr,
150 &tx2_pmu_event_attr_read_hit.attr.attr,
154 TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
155 TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
156 TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
157 TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
159 static struct attribute *dmc_pmu_events_attrs[] = {
160 &tx2_pmu_event_attr_cnt_cycles.attr.attr,
161 &tx2_pmu_event_attr_write_txns.attr.attr,
162 &tx2_pmu_event_attr_data_transfers.attr.attr,
163 &tx2_pmu_event_attr_read_txns.attr.attr,
167 static const struct attribute_group l3c_pmu_events_attr_group = {
169 .attrs = l3c_pmu_events_attrs,
172 static const struct attribute_group dmc_pmu_events_attr_group = {
174 .attrs = dmc_pmu_events_attrs,
178 * sysfs cpumask attributes
180 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
183 struct tx2_uncore_pmu *tx2_pmu;
185 tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
186 return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
188 static DEVICE_ATTR_RO(cpumask);
190 static struct attribute *tx2_pmu_cpumask_attrs[] = {
191 &dev_attr_cpumask.attr,
195 static const struct attribute_group pmu_cpumask_attr_group = {
196 .attrs = tx2_pmu_cpumask_attrs,
200 * Per PMU device attribute groups
202 static const struct attribute_group *l3c_pmu_attr_groups[] = {
203 &l3c_pmu_format_attr_group,
204 &pmu_cpumask_attr_group,
205 &l3c_pmu_events_attr_group,
209 static const struct attribute_group *dmc_pmu_attr_groups[] = {
210 &dmc_pmu_format_attr_group,
211 &pmu_cpumask_attr_group,
212 &dmc_pmu_events_attr_group,
216 static inline u32 reg_readl(unsigned long addr)
218 return readl((void __iomem *)addr);
221 static inline void reg_writel(u32 val, unsigned long addr)
223 writel(val, (void __iomem *)addr);
226 static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
230 counter = find_first_zero_bit(tx2_pmu->active_counters,
231 tx2_pmu->max_counters);
232 if (counter == tx2_pmu->max_counters)
235 set_bit(counter, tx2_pmu->active_counters);
239 static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
241 clear_bit(counter, tx2_pmu->active_counters);
244 static void init_cntr_base_l3c(struct perf_event *event,
245 struct tx2_uncore_pmu *tx2_pmu)
247 struct hw_perf_event *hwc = &event->hw;
249 /* counter ctrl/data reg offset at 8 */
250 hwc->config_base = (unsigned long)tx2_pmu->base
251 + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
252 hwc->event_base = (unsigned long)tx2_pmu->base
253 + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
256 static void init_cntr_base_dmc(struct perf_event *event,
257 struct tx2_uncore_pmu *tx2_pmu)
259 struct hw_perf_event *hwc = &event->hw;
261 hwc->config_base = (unsigned long)tx2_pmu->base
263 /* counter data reg offset at 0xc */
264 hwc->event_base = (unsigned long)tx2_pmu->base
265 + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
268 static void uncore_start_event_l3c(struct perf_event *event, int flags)
271 struct hw_perf_event *hwc = &event->hw;
273 /* event id encoded in bits [07:03] */
274 val = GET_EVENTID(event) << 3;
275 reg_writel(val, hwc->config_base);
276 local64_set(&hwc->prev_count, 0);
277 reg_writel(0, hwc->event_base);
280 static inline void uncore_stop_event_l3c(struct perf_event *event)
282 reg_writel(0, event->hw.config_base);
285 static void uncore_start_event_dmc(struct perf_event *event, int flags)
288 struct hw_perf_event *hwc = &event->hw;
289 int idx = GET_COUNTERID(event);
290 int event_id = GET_EVENTID(event);
292 /* enable and start counters.
293 * 8 bits for each counter, bits[05:01] of a counter to set event type.
295 val = reg_readl(hwc->config_base);
296 val &= ~DMC_EVENT_CFG(idx, 0x1f);
297 val |= DMC_EVENT_CFG(idx, event_id);
298 reg_writel(val, hwc->config_base);
299 local64_set(&hwc->prev_count, 0);
300 reg_writel(0, hwc->event_base);
303 static void uncore_stop_event_dmc(struct perf_event *event)
306 struct hw_perf_event *hwc = &event->hw;
307 int idx = GET_COUNTERID(event);
309 /* clear event type(bits[05:01]) to stop counter */
310 val = reg_readl(hwc->config_base);
311 val &= ~DMC_EVENT_CFG(idx, 0x1f);
312 reg_writel(val, hwc->config_base);
315 static void tx2_uncore_event_update(struct perf_event *event)
317 s64 prev, delta, new = 0;
318 struct hw_perf_event *hwc = &event->hw;
319 struct tx2_uncore_pmu *tx2_pmu;
320 enum tx2_uncore_type type;
323 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
324 type = tx2_pmu->type;
325 prorate_factor = tx2_pmu->prorate_factor;
327 new = reg_readl(hwc->event_base);
328 prev = local64_xchg(&hwc->prev_count, new);
330 /* handles rollover of 32 bit counter */
331 delta = (u32)(((1UL << 32) - prev) + new);
333 /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
334 if (type == PMU_TYPE_DMC &&
335 GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
338 /* L3C and DMC has 16 and 8 interleave channels respectively.
339 * The sampled value is for channel 0 and multiplied with
340 * prorate_factor to get the count for a device.
342 local64_add(delta * prorate_factor, &event->count);
345 static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
348 struct acpi_tx2_pmu_device {
349 __u8 id[ACPI_ID_LEN];
350 enum tx2_uncore_type type;
352 {"CAV901D", PMU_TYPE_L3C},
353 {"CAV901F", PMU_TYPE_DMC},
354 {"", PMU_TYPE_INVALID}
357 while (devices[i].type != PMU_TYPE_INVALID) {
358 if (!strcmp(acpi_device_hid(adev), devices[i].id))
363 return devices[i].type;
366 static bool tx2_uncore_validate_event(struct pmu *pmu,
367 struct perf_event *event, int *counters)
369 if (is_software_event(event))
371 /* Reject groups spanning multiple HW PMUs. */
372 if (event->pmu != pmu)
375 *counters = *counters + 1;
380 * Make sure the group of events can be scheduled at once
383 static bool tx2_uncore_validate_event_group(struct perf_event *event)
385 struct perf_event *sibling, *leader = event->group_leader;
388 if (event->group_leader == event)
391 if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
394 for_each_sibling_event(sibling, leader) {
395 if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
399 if (!tx2_uncore_validate_event(event->pmu, event, &counters))
403 * If the group requires more counters than the HW has,
404 * it cannot ever be scheduled.
406 return counters <= TX2_PMU_MAX_COUNTERS;
410 static int tx2_uncore_event_init(struct perf_event *event)
412 struct hw_perf_event *hwc = &event->hw;
413 struct tx2_uncore_pmu *tx2_pmu;
415 /* Test the event attr type check for PMU enumeration */
416 if (event->attr.type != event->pmu->type)
420 * SOC PMU counters are shared across all cores.
421 * Therefore, it does not support per-process mode.
422 * Also, it does not support event sampling mode.
424 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
427 /* We have no filtering of any kind */
428 if (event->attr.exclude_user ||
429 event->attr.exclude_kernel ||
430 event->attr.exclude_hv ||
431 event->attr.exclude_idle ||
432 event->attr.exclude_host ||
433 event->attr.exclude_guest)
439 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
440 if (tx2_pmu->cpu >= nr_cpu_ids)
442 event->cpu = tx2_pmu->cpu;
444 if (event->attr.config >= tx2_pmu->max_events)
448 hwc->config = event->attr.config;
450 /* Validate the group */
451 if (!tx2_uncore_validate_event_group(event))
457 static void tx2_uncore_event_start(struct perf_event *event, int flags)
459 struct hw_perf_event *hwc = &event->hw;
460 struct tx2_uncore_pmu *tx2_pmu;
463 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
465 tx2_pmu->start_event(event, flags);
466 perf_event_update_userpage(event);
468 /* Start timer for first event */
469 if (bitmap_weight(tx2_pmu->active_counters,
470 tx2_pmu->max_counters) == 1) {
471 hrtimer_start(&tx2_pmu->hrtimer,
472 ns_to_ktime(tx2_pmu->hrtimer_interval),
473 HRTIMER_MODE_REL_PINNED);
477 static void tx2_uncore_event_stop(struct perf_event *event, int flags)
479 struct hw_perf_event *hwc = &event->hw;
480 struct tx2_uncore_pmu *tx2_pmu;
482 if (hwc->state & PERF_HES_UPTODATE)
485 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
486 tx2_pmu->stop_event(event);
487 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
488 hwc->state |= PERF_HES_STOPPED;
489 if (flags & PERF_EF_UPDATE) {
490 tx2_uncore_event_update(event);
491 hwc->state |= PERF_HES_UPTODATE;
495 static int tx2_uncore_event_add(struct perf_event *event, int flags)
497 struct hw_perf_event *hwc = &event->hw;
498 struct tx2_uncore_pmu *tx2_pmu;
500 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
502 /* Allocate a free counter */
503 hwc->idx = alloc_counter(tx2_pmu);
507 tx2_pmu->events[hwc->idx] = event;
508 /* set counter control and data registers base address */
509 tx2_pmu->init_cntr_base(event, tx2_pmu);
511 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
512 if (flags & PERF_EF_START)
513 tx2_uncore_event_start(event, flags);
518 static void tx2_uncore_event_del(struct perf_event *event, int flags)
520 struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
521 struct hw_perf_event *hwc = &event->hw;
523 tx2_uncore_event_stop(event, PERF_EF_UPDATE);
525 /* clear the assigned counter */
526 free_counter(tx2_pmu, GET_COUNTERID(event));
528 perf_event_update_userpage(event);
529 tx2_pmu->events[hwc->idx] = NULL;
533 static void tx2_uncore_event_read(struct perf_event *event)
535 tx2_uncore_event_update(event);
538 static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
540 struct tx2_uncore_pmu *tx2_pmu;
541 int max_counters, idx;
543 tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
544 max_counters = tx2_pmu->max_counters;
546 if (bitmap_empty(tx2_pmu->active_counters, max_counters))
547 return HRTIMER_NORESTART;
549 for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
550 struct perf_event *event = tx2_pmu->events[idx];
552 tx2_uncore_event_update(event);
554 hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
555 return HRTIMER_RESTART;
558 static int tx2_uncore_pmu_register(
559 struct tx2_uncore_pmu *tx2_pmu)
561 struct device *dev = tx2_pmu->dev;
562 char *name = tx2_pmu->name;
564 /* Perf event registration */
565 tx2_pmu->pmu = (struct pmu) {
566 .module = THIS_MODULE,
567 .attr_groups = tx2_pmu->attr_groups,
568 .task_ctx_nr = perf_invalid_context,
569 .event_init = tx2_uncore_event_init,
570 .add = tx2_uncore_event_add,
571 .del = tx2_uncore_event_del,
572 .start = tx2_uncore_event_start,
573 .stop = tx2_uncore_event_stop,
574 .read = tx2_uncore_event_read,
577 tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
580 return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
583 static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
587 cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
591 hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
592 tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
594 ret = tx2_uncore_pmu_register(tx2_pmu);
596 dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
601 /* register hotplug callback for the pmu */
602 ret = cpuhp_state_add_instance(
603 CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
606 dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
611 list_add(&tx2_pmu->entry, &tx2_pmus);
613 dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
618 static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
619 acpi_handle handle, struct acpi_device *adev, u32 type)
621 struct tx2_uncore_pmu *tx2_pmu;
624 struct resource_entry *rentry;
625 struct list_head list;
628 INIT_LIST_HEAD(&list);
629 ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
631 dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
635 list_for_each_entry(rentry, &list, node) {
636 if (resource_type(rentry->res) == IORESOURCE_MEM) {
645 acpi_dev_free_resource_list(&list);
646 base = devm_ioremap_resource(dev, &res);
648 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
652 tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
657 tx2_pmu->type = type;
658 tx2_pmu->base = base;
659 tx2_pmu->node = dev_to_node(dev);
660 INIT_LIST_HEAD(&tx2_pmu->entry);
662 switch (tx2_pmu->type) {
664 tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
665 tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
666 tx2_pmu->max_events = L3_EVENT_MAX;
667 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
668 tx2_pmu->attr_groups = l3c_pmu_attr_groups;
669 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
670 "uncore_l3c_%d", tx2_pmu->node);
671 tx2_pmu->init_cntr_base = init_cntr_base_l3c;
672 tx2_pmu->start_event = uncore_start_event_l3c;
673 tx2_pmu->stop_event = uncore_stop_event_l3c;
676 tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
677 tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
678 tx2_pmu->max_events = DMC_EVENT_MAX;
679 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
680 tx2_pmu->attr_groups = dmc_pmu_attr_groups;
681 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
682 "uncore_dmc_%d", tx2_pmu->node);
683 tx2_pmu->init_cntr_base = init_cntr_base_dmc;
684 tx2_pmu->start_event = uncore_start_event_dmc;
685 tx2_pmu->stop_event = uncore_stop_event_dmc;
687 case PMU_TYPE_INVALID:
688 devm_kfree(dev, tx2_pmu);
695 static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
696 void *data, void **return_value)
698 struct tx2_uncore_pmu *tx2_pmu;
699 struct acpi_device *adev;
700 enum tx2_uncore_type type;
702 if (acpi_bus_get_device(handle, &adev))
704 if (acpi_bus_get_status(adev) || !adev->status.present)
707 type = get_tx2_pmu_type(adev);
708 if (type == PMU_TYPE_INVALID)
711 tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
717 if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
718 /* Can't add the PMU device, abort */
724 static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
725 struct hlist_node *hpnode)
727 struct tx2_uncore_pmu *tx2_pmu;
729 tx2_pmu = hlist_entry_safe(hpnode,
730 struct tx2_uncore_pmu, hpnode);
732 /* Pick this CPU, If there is no CPU/PMU association and both are
735 if ((tx2_pmu->cpu >= nr_cpu_ids) &&
736 (tx2_pmu->node == cpu_to_node(cpu)))
742 static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
743 struct hlist_node *hpnode)
746 struct tx2_uncore_pmu *tx2_pmu;
747 struct cpumask cpu_online_mask_temp;
749 tx2_pmu = hlist_entry_safe(hpnode,
750 struct tx2_uncore_pmu, hpnode);
752 if (cpu != tx2_pmu->cpu)
755 hrtimer_cancel(&tx2_pmu->hrtimer);
756 cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
757 cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
758 new_cpu = cpumask_any_and(
759 cpumask_of_node(tx2_pmu->node),
760 &cpu_online_mask_temp);
762 tx2_pmu->cpu = new_cpu;
763 if (new_cpu >= nr_cpu_ids)
765 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
770 static const struct acpi_device_id tx2_uncore_acpi_match[] = {
774 MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
776 static int tx2_uncore_probe(struct platform_device *pdev)
778 struct device *dev = &pdev->dev;
782 set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
784 if (!has_acpi_companion(dev))
787 handle = ACPI_HANDLE(dev);
791 /* Walk through the tree for all PMU UNCORE devices */
792 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
795 if (ACPI_FAILURE(status)) {
796 dev_err(dev, "failed to probe PMU devices\n");
797 return_ACPI_STATUS(status);
800 dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
804 static int tx2_uncore_remove(struct platform_device *pdev)
806 struct tx2_uncore_pmu *tx2_pmu, *temp;
807 struct device *dev = &pdev->dev;
809 if (!list_empty(&tx2_pmus)) {
810 list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
811 if (tx2_pmu->node == dev_to_node(dev)) {
812 cpuhp_state_remove_instance_nocalls(
813 CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
815 perf_pmu_unregister(&tx2_pmu->pmu);
816 list_del(&tx2_pmu->entry);
823 static struct platform_driver tx2_uncore_driver = {
825 .name = "tx2-uncore-pmu",
826 .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
828 .probe = tx2_uncore_probe,
829 .remove = tx2_uncore_remove,
832 static int __init tx2_uncore_driver_init(void)
836 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
837 "perf/tx2/uncore:online",
838 tx2_uncore_pmu_online_cpu,
839 tx2_uncore_pmu_offline_cpu);
841 pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
844 ret = platform_driver_register(&tx2_uncore_driver);
846 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
850 module_init(tx2_uncore_driver_init);
852 static void __exit tx2_uncore_driver_exit(void)
854 platform_driver_unregister(&tx2_uncore_driver);
855 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
857 module_exit(tx2_uncore_driver_exit);
859 MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
860 MODULE_LICENSE("GPL v2");
861 MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");