1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
4 #include <asm/cpu_device_id.h>
5 #include <asm/intel-family.h>
7 #include "uncore_discovery.h"
9 static bool uncore_no_discover;
10 module_param(uncore_no_discover, bool, 0);
11 MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism "
12 "(default: enable the discovery mechanism).");
13 struct intel_uncore_type *empty_uncore[] = { NULL, };
14 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
15 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
16 struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
18 static bool pcidrv_registered;
19 struct pci_driver *uncore_pci_driver;
20 /* The PCI driver for the device which the uncore doesn't own. */
21 struct pci_driver *uncore_pci_sub_driver;
22 /* pci bus to socket mapping */
23 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
24 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
25 struct pci_extra_dev *uncore_extra_pci_dev;
26 int __uncore_max_dies;
28 /* mask of cpus that collect uncore events */
29 static cpumask_t uncore_cpu_mask;
31 /* constraint for the fixed counter */
32 static struct event_constraint uncore_constraint_fixed =
33 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
34 struct event_constraint uncore_constraint_empty =
35 EVENT_CONSTRAINT(0, 0, 0);
37 MODULE_LICENSE("GPL");
39 int uncore_pcibus_to_dieid(struct pci_bus *bus)
41 struct pci2phy_map *map;
44 raw_spin_lock(&pci2phy_map_lock);
45 list_for_each_entry(map, &pci2phy_map_head, list) {
46 if (map->segment == pci_domain_nr(bus)) {
47 die_id = map->pbus_to_dieid[bus->number];
51 raw_spin_unlock(&pci2phy_map_lock);
56 int uncore_die_to_segment(int die)
58 struct pci_bus *bus = NULL;
60 /* Find first pci bus which attributes to specified die. */
61 while ((bus = pci_find_next_bus(bus)) &&
62 (die != uncore_pcibus_to_dieid(bus)))
65 return bus ? pci_domain_nr(bus) : -EINVAL;
68 int uncore_device_to_die(struct pci_dev *dev)
70 int node = pcibus_to_node(dev->bus);
73 for_each_cpu(cpu, cpumask_of_pcibus(dev->bus)) {
74 struct cpuinfo_x86 *c = &cpu_data(cpu);
76 if (c->initialized && cpu_to_node(cpu) == node)
77 return c->topo.logical_die_id;
83 static void uncore_free_pcibus_map(void)
85 struct pci2phy_map *map, *tmp;
87 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
93 struct pci2phy_map *__find_pci2phy_map(int segment)
95 struct pci2phy_map *map, *alloc = NULL;
98 lockdep_assert_held(&pci2phy_map_lock);
101 list_for_each_entry(map, &pci2phy_map_head, list) {
102 if (map->segment == segment)
107 raw_spin_unlock(&pci2phy_map_lock);
108 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
109 raw_spin_lock(&pci2phy_map_lock);
119 map->segment = segment;
120 for (i = 0; i < 256; i++)
121 map->pbus_to_dieid[i] = -1;
122 list_add_tail(&map->list, &pci2phy_map_head);
129 ssize_t uncore_event_show(struct device *dev,
130 struct device_attribute *attr, char *buf)
132 struct uncore_event_desc *event =
133 container_of(attr, struct uncore_event_desc, attr);
134 return sprintf(buf, "%s", event->config);
137 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
139 unsigned int dieid = topology_logical_die_id(cpu);
142 * The unsigned check also catches the '-1' return value for non
143 * existent mappings in the topology map.
145 return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL;
148 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
152 rdmsrl(event->hw.event_base, count);
157 void uncore_mmio_exit_box(struct intel_uncore_box *box)
160 iounmap(box->io_addr);
163 u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
164 struct perf_event *event)
169 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base))
172 return readq(box->io_addr + event->hw.event_base);
176 * generic get constraint function for shared match/mask registers.
178 struct event_constraint *
179 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
181 struct intel_uncore_extra_reg *er;
182 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
183 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
188 * reg->alloc can be set due to existing state, so for fake box we
189 * need to ignore this, otherwise we might fail to allocate proper
190 * fake state for this extra reg constraint.
192 if (reg1->idx == EXTRA_REG_NONE ||
193 (!uncore_box_is_fake(box) && reg1->alloc))
196 er = &box->shared_regs[reg1->idx];
197 raw_spin_lock_irqsave(&er->lock, flags);
198 if (!atomic_read(&er->ref) ||
199 (er->config1 == reg1->config && er->config2 == reg2->config)) {
200 atomic_inc(&er->ref);
201 er->config1 = reg1->config;
202 er->config2 = reg2->config;
205 raw_spin_unlock_irqrestore(&er->lock, flags);
208 if (!uncore_box_is_fake(box))
213 return &uncore_constraint_empty;
216 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
218 struct intel_uncore_extra_reg *er;
219 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
222 * Only put constraint if extra reg was actually allocated. Also
223 * takes care of event which do not use an extra shared reg.
225 * Also, if this is a fake box we shouldn't touch any event state
226 * (reg->alloc) and we don't care about leaving inconsistent box
227 * state either since it will be thrown out.
229 if (uncore_box_is_fake(box) || !reg1->alloc)
232 er = &box->shared_regs[reg1->idx];
233 atomic_dec(&er->ref);
237 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
239 struct intel_uncore_extra_reg *er;
243 er = &box->shared_regs[idx];
245 raw_spin_lock_irqsave(&er->lock, flags);
247 raw_spin_unlock_irqrestore(&er->lock, flags);
252 static void uncore_assign_hw_event(struct intel_uncore_box *box,
253 struct perf_event *event, int idx)
255 struct hw_perf_event *hwc = &event->hw;
258 hwc->last_tag = ++box->tags[idx];
260 if (uncore_pmc_fixed(hwc->idx)) {
261 hwc->event_base = uncore_fixed_ctr(box);
262 hwc->config_base = uncore_fixed_ctl(box);
266 hwc->config_base = uncore_event_ctl(box, hwc->idx);
267 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
270 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
272 u64 prev_count, new_count, delta;
275 if (uncore_pmc_freerunning(event->hw.idx))
276 shift = 64 - uncore_freerunning_bits(box, event);
277 else if (uncore_pmc_fixed(event->hw.idx))
278 shift = 64 - uncore_fixed_ctr_bits(box);
280 shift = 64 - uncore_perf_ctr_bits(box);
282 /* the hrtimer might modify the previous event value */
284 prev_count = local64_read(&event->hw.prev_count);
285 new_count = uncore_read_counter(box, event);
286 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
289 delta = (new_count << shift) - (prev_count << shift);
292 local64_add(delta, &event->count);
296 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
297 * for SandyBridge. So we use hrtimer to periodically poll the counter
300 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
302 struct intel_uncore_box *box;
303 struct perf_event *event;
307 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
308 if (!box->n_active || box->cpu != smp_processor_id())
309 return HRTIMER_NORESTART;
311 * disable local interrupt to prevent uncore_pmu_event_start/stop
312 * to interrupt the update process
314 local_irq_save(flags);
317 * handle boxes with an active event list as opposed to active
320 list_for_each_entry(event, &box->active_list, active_entry) {
321 uncore_perf_event_update(box, event);
324 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
325 uncore_perf_event_update(box, box->events[bit]);
327 local_irq_restore(flags);
329 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
330 return HRTIMER_RESTART;
333 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
335 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
336 HRTIMER_MODE_REL_PINNED);
339 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
341 hrtimer_cancel(&box->hrtimer);
344 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
346 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
347 box->hrtimer.function = uncore_pmu_hrtimer;
350 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
353 int i, size, numshared = type->num_shared_regs ;
354 struct intel_uncore_box *box;
356 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
358 box = kzalloc_node(size, GFP_KERNEL, node);
362 for (i = 0; i < numshared; i++)
363 raw_spin_lock_init(&box->shared_regs[i].lock);
365 uncore_pmu_init_hrtimer(box);
369 /* set default hrtimer timeout */
370 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
372 INIT_LIST_HEAD(&box->active_list);
378 * Using uncore_pmu_event_init pmu event_init callback
379 * as a detection point for uncore events.
381 static int uncore_pmu_event_init(struct perf_event *event);
383 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
385 return &box->pmu->pmu == event->pmu;
389 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
392 struct perf_event *event;
395 max_count = box->pmu->type->num_counters;
396 if (box->pmu->type->fixed_ctl)
399 if (box->n_events >= max_count)
404 if (is_box_event(box, leader)) {
405 box->event_list[n] = leader;
412 for_each_sibling_event(event, leader) {
413 if (!is_box_event(box, event) ||
414 event->state <= PERF_EVENT_STATE_OFF)
420 box->event_list[n] = event;
426 static struct event_constraint *
427 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
429 struct intel_uncore_type *type = box->pmu->type;
430 struct event_constraint *c;
432 if (type->ops->get_constraint) {
433 c = type->ops->get_constraint(box, event);
438 if (event->attr.config == UNCORE_FIXED_EVENT)
439 return &uncore_constraint_fixed;
441 if (type->constraints) {
442 for_each_event_constraint(c, type->constraints) {
443 if ((event->hw.config & c->cmask) == c->code)
448 return &type->unconstrainted;
451 static void uncore_put_event_constraint(struct intel_uncore_box *box,
452 struct perf_event *event)
454 if (box->pmu->type->ops->put_constraint)
455 box->pmu->type->ops->put_constraint(box, event);
458 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
460 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
461 struct event_constraint *c;
462 int i, wmin, wmax, ret = 0;
463 struct hw_perf_event *hwc;
465 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
467 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
468 c = uncore_get_event_constraint(box, box->event_list[i]);
469 box->event_constraint[i] = c;
470 wmin = min(wmin, c->weight);
471 wmax = max(wmax, c->weight);
474 /* fastpath, try to reuse previous register */
475 for (i = 0; i < n; i++) {
476 hwc = &box->event_list[i]->hw;
477 c = box->event_constraint[i];
483 /* constraint still honored */
484 if (!test_bit(hwc->idx, c->idxmsk))
487 /* not already used */
488 if (test_bit(hwc->idx, used_mask))
491 __set_bit(hwc->idx, used_mask);
493 assign[i] = hwc->idx;
497 ret = perf_assign_events(box->event_constraint, n,
498 wmin, wmax, n, assign);
500 if (!assign || ret) {
501 for (i = 0; i < n; i++)
502 uncore_put_event_constraint(box, box->event_list[i]);
504 return ret ? -EINVAL : 0;
507 void uncore_pmu_event_start(struct perf_event *event, int flags)
509 struct intel_uncore_box *box = uncore_event_to_box(event);
510 int idx = event->hw.idx;
512 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
516 * Free running counter is read-only and always active.
517 * Use the current counter value as start point.
518 * There is no overflow interrupt for free running counter.
519 * Use hrtimer to periodically poll the counter to avoid overflow.
521 if (uncore_pmc_freerunning(event->hw.idx)) {
522 list_add_tail(&event->active_entry, &box->active_list);
523 local64_set(&event->hw.prev_count,
524 uncore_read_counter(box, event));
525 if (box->n_active++ == 0)
526 uncore_pmu_start_hrtimer(box);
530 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
534 box->events[idx] = event;
536 __set_bit(idx, box->active_mask);
538 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
539 uncore_enable_event(box, event);
541 if (box->n_active == 1)
542 uncore_pmu_start_hrtimer(box);
545 void uncore_pmu_event_stop(struct perf_event *event, int flags)
547 struct intel_uncore_box *box = uncore_event_to_box(event);
548 struct hw_perf_event *hwc = &event->hw;
550 /* Cannot disable free running counter which is read-only */
551 if (uncore_pmc_freerunning(hwc->idx)) {
552 list_del(&event->active_entry);
553 if (--box->n_active == 0)
554 uncore_pmu_cancel_hrtimer(box);
555 uncore_perf_event_update(box, event);
559 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
560 uncore_disable_event(box, event);
562 box->events[hwc->idx] = NULL;
563 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
564 hwc->state |= PERF_HES_STOPPED;
566 if (box->n_active == 0)
567 uncore_pmu_cancel_hrtimer(box);
570 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
572 * Drain the remaining delta count out of a event
573 * that we are disabling:
575 uncore_perf_event_update(box, event);
576 hwc->state |= PERF_HES_UPTODATE;
580 int uncore_pmu_event_add(struct perf_event *event, int flags)
582 struct intel_uncore_box *box = uncore_event_to_box(event);
583 struct hw_perf_event *hwc = &event->hw;
584 int assign[UNCORE_PMC_IDX_MAX];
591 * The free funning counter is assigned in event_init().
592 * The free running counter event and free running counter
593 * are 1:1 mapped. It doesn't need to be tracked in event_list.
595 if (uncore_pmc_freerunning(hwc->idx)) {
596 if (flags & PERF_EF_START)
597 uncore_pmu_event_start(event, 0);
601 ret = n = uncore_collect_events(box, event, false);
605 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
606 if (!(flags & PERF_EF_START))
607 hwc->state |= PERF_HES_ARCH;
609 ret = uncore_assign_events(box, assign, n);
613 /* save events moving to new counters */
614 for (i = 0; i < box->n_events; i++) {
615 event = box->event_list[i];
618 if (hwc->idx == assign[i] &&
619 hwc->last_tag == box->tags[assign[i]])
622 * Ensure we don't accidentally enable a stopped
623 * counter simply because we rescheduled.
625 if (hwc->state & PERF_HES_STOPPED)
626 hwc->state |= PERF_HES_ARCH;
628 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
631 /* reprogram moved events into new counters */
632 for (i = 0; i < n; i++) {
633 event = box->event_list[i];
636 if (hwc->idx != assign[i] ||
637 hwc->last_tag != box->tags[assign[i]])
638 uncore_assign_hw_event(box, event, assign[i]);
639 else if (i < box->n_events)
642 if (hwc->state & PERF_HES_ARCH)
645 uncore_pmu_event_start(event, 0);
652 void uncore_pmu_event_del(struct perf_event *event, int flags)
654 struct intel_uncore_box *box = uncore_event_to_box(event);
657 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
660 * The event for free running counter is not tracked by event_list.
661 * It doesn't need to force event->hw.idx = -1 to reassign the counter.
662 * Because the event and the free running counter are 1:1 mapped.
664 if (uncore_pmc_freerunning(event->hw.idx))
667 for (i = 0; i < box->n_events; i++) {
668 if (event == box->event_list[i]) {
669 uncore_put_event_constraint(box, event);
671 for (++i; i < box->n_events; i++)
672 box->event_list[i - 1] = box->event_list[i];
680 event->hw.last_tag = ~0ULL;
683 void uncore_pmu_event_read(struct perf_event *event)
685 struct intel_uncore_box *box = uncore_event_to_box(event);
686 uncore_perf_event_update(box, event);
690 * validation ensures the group can be loaded onto the
691 * PMU if it was the only group available.
693 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
694 struct perf_event *event)
696 struct perf_event *leader = event->group_leader;
697 struct intel_uncore_box *fake_box;
698 int ret = -EINVAL, n;
700 /* The free running counter is always active. */
701 if (uncore_pmc_freerunning(event->hw.idx))
704 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
710 * the event is not yet connected with its
711 * siblings therefore we must first collect
712 * existing siblings, then add the new event
713 * before we can simulate the scheduling
715 n = uncore_collect_events(fake_box, leader, true);
719 fake_box->n_events = n;
720 n = uncore_collect_events(fake_box, event, false);
724 fake_box->n_events = n;
726 ret = uncore_assign_events(fake_box, NULL, n);
732 static int uncore_pmu_event_init(struct perf_event *event)
734 struct intel_uncore_pmu *pmu;
735 struct intel_uncore_box *box;
736 struct hw_perf_event *hwc = &event->hw;
739 if (event->attr.type != event->pmu->type)
742 pmu = uncore_event_to_pmu(event);
743 /* no device found for this pmu */
744 if (pmu->func_id < 0)
747 /* Sampling not supported yet */
748 if (hwc->sample_period)
752 * Place all uncore events for a particular physical package
757 box = uncore_pmu_to_box(pmu, event->cpu);
758 if (!box || box->cpu < 0)
760 event->cpu = box->cpu;
761 event->pmu_private = box;
763 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
766 event->hw.last_tag = ~0ULL;
767 event->hw.extra_reg.idx = EXTRA_REG_NONE;
768 event->hw.branch_reg.idx = EXTRA_REG_NONE;
770 if (event->attr.config == UNCORE_FIXED_EVENT) {
771 /* no fixed counter */
772 if (!pmu->type->fixed_ctl)
775 * if there is only one fixed counter, only the first pmu
776 * can access the fixed counter
778 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
781 /* fixed counters have event field hardcoded to zero */
783 } else if (is_freerunning_event(event)) {
784 hwc->config = event->attr.config;
785 if (!check_valid_freerunning_event(box, event))
787 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
789 * The free running counter event and free running counter
790 * are always 1:1 mapped.
791 * The free running counter is always active.
792 * Assign the free running counter here.
794 event->hw.event_base = uncore_freerunning_counter(box, event);
796 hwc->config = event->attr.config &
797 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
798 if (pmu->type->ops->hw_config) {
799 ret = pmu->type->ops->hw_config(box, event);
805 if (event->group_leader != event)
806 ret = uncore_validate_group(pmu, event);
813 static void uncore_pmu_enable(struct pmu *pmu)
815 struct intel_uncore_pmu *uncore_pmu;
816 struct intel_uncore_box *box;
818 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
820 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
824 if (uncore_pmu->type->ops->enable_box)
825 uncore_pmu->type->ops->enable_box(box);
828 static void uncore_pmu_disable(struct pmu *pmu)
830 struct intel_uncore_pmu *uncore_pmu;
831 struct intel_uncore_box *box;
833 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
835 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
839 if (uncore_pmu->type->ops->disable_box)
840 uncore_pmu->type->ops->disable_box(box);
843 static ssize_t uncore_get_attr_cpumask(struct device *dev,
844 struct device_attribute *attr, char *buf)
846 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
849 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
851 static struct attribute *uncore_pmu_attrs[] = {
852 &dev_attr_cpumask.attr,
856 static const struct attribute_group uncore_pmu_attr_group = {
857 .attrs = uncore_pmu_attrs,
860 static inline int uncore_get_box_id(struct intel_uncore_type *type,
861 struct intel_uncore_pmu *pmu)
863 return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx;
866 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
868 struct intel_uncore_type *type = pmu->type;
870 if (type->num_boxes == 1)
871 sprintf(pmu_name, "uncore_type_%u", type->type_id);
873 sprintf(pmu_name, "uncore_type_%u_%d",
874 type->type_id, uncore_get_box_id(type, pmu));
878 static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
880 struct intel_uncore_type *type = pmu->type;
883 * No uncore block name in discovery table.
884 * Use uncore_type_&typeid_&boxid as name.
887 uncore_get_alias_name(pmu->name, pmu);
891 if (type->num_boxes == 1) {
892 if (strlen(type->name) > 0)
893 sprintf(pmu->name, "uncore_%s", type->name);
895 sprintf(pmu->name, "uncore");
898 * Use the box ID from the discovery table if applicable.
900 sprintf(pmu->name, "uncore_%s_%d", type->name,
901 uncore_get_box_id(type, pmu));
905 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
909 if (!pmu->type->pmu) {
910 pmu->pmu = (struct pmu) {
911 .attr_groups = pmu->type->attr_groups,
912 .task_ctx_nr = perf_invalid_context,
913 .pmu_enable = uncore_pmu_enable,
914 .pmu_disable = uncore_pmu_disable,
915 .event_init = uncore_pmu_event_init,
916 .add = uncore_pmu_event_add,
917 .del = uncore_pmu_event_del,
918 .start = uncore_pmu_event_start,
919 .stop = uncore_pmu_event_stop,
920 .read = uncore_pmu_event_read,
921 .module = THIS_MODULE,
922 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
923 .attr_update = pmu->type->attr_update,
926 pmu->pmu = *pmu->type->pmu;
927 pmu->pmu.attr_groups = pmu->type->attr_groups;
928 pmu->pmu.attr_update = pmu->type->attr_update;
931 uncore_get_pmu_name(pmu);
933 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
935 pmu->registered = true;
939 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
941 if (!pmu->registered)
943 perf_pmu_unregister(&pmu->pmu);
944 pmu->registered = false;
947 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
951 for (die = 0; die < uncore_max_dies(); die++)
952 kfree(pmu->boxes[die]);
956 static void uncore_type_exit(struct intel_uncore_type *type)
958 struct intel_uncore_pmu *pmu = type->pmus;
961 if (type->cleanup_mapping)
962 type->cleanup_mapping(type);
965 for (i = 0; i < type->num_boxes; i++, pmu++) {
966 uncore_pmu_unregister(pmu);
967 uncore_free_boxes(pmu);
973 kfree(type->box_ids);
974 type->box_ids = NULL;
976 kfree(type->events_group);
977 type->events_group = NULL;
980 static void uncore_types_exit(struct intel_uncore_type **types)
982 for (; *types; types++)
983 uncore_type_exit(*types);
986 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
988 struct intel_uncore_pmu *pmus;
992 pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
996 size = uncore_max_dies() * sizeof(struct intel_uncore_box *);
998 for (i = 0; i < type->num_boxes; i++) {
999 pmus[i].func_id = setid ? i : -1;
1000 pmus[i].pmu_idx = i;
1001 pmus[i].type = type;
1002 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
1008 type->unconstrainted = (struct event_constraint)
1009 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
1010 0, type->num_counters, 0, 0);
1012 if (type->event_descs) {
1014 struct attribute_group group;
1015 struct attribute *attrs[];
1017 for (i = 0; type->event_descs[i].attr.attr.name; i++);
1019 attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
1024 attr_group->group.name = "events";
1025 attr_group->group.attrs = attr_group->attrs;
1027 for (j = 0; j < i; j++)
1028 attr_group->attrs[j] = &type->event_descs[j].attr.attr;
1030 type->events_group = &attr_group->group;
1033 type->pmu_group = &uncore_pmu_attr_group;
1035 if (type->set_mapping)
1036 type->set_mapping(type);
1041 for (i = 0; i < type->num_boxes; i++)
1042 kfree(pmus[i].boxes);
1049 uncore_types_init(struct intel_uncore_type **types, bool setid)
1053 for (; *types; types++) {
1054 ret = uncore_type_init(*types, setid);
1062 * Get the die information of a PCI device.
1063 * @pdev: The PCI device.
1064 * @die: The die id which the device maps to.
1066 static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die)
1068 *die = uncore_pcibus_to_dieid(pdev->bus);
1075 static struct intel_uncore_pmu *
1076 uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev)
1078 struct intel_uncore_type **types = uncore_pci_uncores;
1079 struct intel_uncore_type *type;
1083 for (; *types; types++) {
1085 for (die = 0; die < __uncore_max_dies; die++) {
1086 for (i = 0; i < type->num_boxes; i++) {
1087 if (!type->box_ctls[die])
1089 box_ctl = type->box_ctls[die] + type->pci_offsets[i];
1090 if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) &&
1091 pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) &&
1092 pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl))
1093 return &type->pmus[i];
1102 * Find the PMU of a PCI device.
1103 * @pdev: The PCI device.
1104 * @ids: The ID table of the available PCI devices with a PMU.
1105 * If NULL, search the whole uncore_pci_uncores.
1107 static struct intel_uncore_pmu *
1108 uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
1110 struct intel_uncore_pmu *pmu = NULL;
1111 struct intel_uncore_type *type;
1112 kernel_ulong_t data;
1116 return uncore_pci_find_dev_pmu_from_types(pdev);
1118 while (ids && ids->vendor) {
1119 if ((ids->vendor == pdev->vendor) &&
1120 (ids->device == pdev->device)) {
1121 data = ids->driver_data;
1122 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data),
1123 UNCORE_PCI_DEV_FUNC(data));
1124 if (devfn == pdev->devfn) {
1125 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)];
1126 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)];
1136 * Register the PMU for a PCI device
1137 * @pdev: The PCI device.
1138 * @type: The corresponding PMU type of the device.
1139 * @pmu: The corresponding PMU of the device.
1140 * @die: The die id which the device maps to.
1142 static int uncore_pci_pmu_register(struct pci_dev *pdev,
1143 struct intel_uncore_type *type,
1144 struct intel_uncore_pmu *pmu,
1147 struct intel_uncore_box *box;
1150 if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
1153 box = uncore_alloc_box(type, NUMA_NO_NODE);
1157 if (pmu->func_id < 0)
1158 pmu->func_id = pdev->devfn;
1160 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1162 atomic_inc(&box->refcnt);
1164 box->pci_dev = pdev;
1166 uncore_box_init(box);
1168 pmu->boxes[die] = box;
1169 if (atomic_inc_return(&pmu->activeboxes) > 1)
1172 /* First active box registers the pmu */
1173 ret = uncore_pmu_register(pmu);
1175 pmu->boxes[die] = NULL;
1176 uncore_box_exit(box);
1183 * add a pci uncore device
1185 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1187 struct intel_uncore_type *type;
1188 struct intel_uncore_pmu *pmu = NULL;
1191 ret = uncore_pci_get_dev_die_info(pdev, &die);
1195 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
1196 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
1198 uncore_extra_pci_dev[die].dev[idx] = pdev;
1199 pci_set_drvdata(pdev, NULL);
1203 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
1206 * Some platforms, e.g. Knights Landing, use a common PCI device ID
1207 * for multiple instances of an uncore PMU device type. We should check
1208 * PCI slot and func to indicate the uncore box.
1210 if (id->driver_data & ~0xffff) {
1211 struct pci_driver *pci_drv = to_pci_driver(pdev->dev.driver);
1213 pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table);
1218 * for performance monitoring unit with multiple boxes,
1219 * each box has a different function id.
1221 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
1224 ret = uncore_pci_pmu_register(pdev, type, pmu, die);
1226 pci_set_drvdata(pdev, pmu->boxes[die]);
1232 * Unregister the PMU of a PCI device
1233 * @pmu: The corresponding PMU is unregistered.
1234 * @die: The die id which the device maps to.
1236 static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, int die)
1238 struct intel_uncore_box *box = pmu->boxes[die];
1240 pmu->boxes[die] = NULL;
1241 if (atomic_dec_return(&pmu->activeboxes) == 0)
1242 uncore_pmu_unregister(pmu);
1243 uncore_box_exit(box);
1247 static void uncore_pci_remove(struct pci_dev *pdev)
1249 struct intel_uncore_box *box;
1250 struct intel_uncore_pmu *pmu;
1253 if (uncore_pci_get_dev_die_info(pdev, &die))
1256 box = pci_get_drvdata(pdev);
1258 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1259 if (uncore_extra_pci_dev[die].dev[i] == pdev) {
1260 uncore_extra_pci_dev[die].dev[i] = NULL;
1264 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1270 pci_set_drvdata(pdev, NULL);
1272 uncore_pci_pmu_unregister(pmu, die);
1275 static int uncore_bus_notify(struct notifier_block *nb,
1276 unsigned long action, void *data,
1277 const struct pci_device_id *ids)
1279 struct device *dev = data;
1280 struct pci_dev *pdev = to_pci_dev(dev);
1281 struct intel_uncore_pmu *pmu;
1284 /* Unregister the PMU when the device is going to be deleted. */
1285 if (action != BUS_NOTIFY_DEL_DEVICE)
1288 pmu = uncore_pci_find_dev_pmu(pdev, ids);
1292 if (uncore_pci_get_dev_die_info(pdev, &die))
1295 uncore_pci_pmu_unregister(pmu, die);
1300 static int uncore_pci_sub_bus_notify(struct notifier_block *nb,
1301 unsigned long action, void *data)
1303 return uncore_bus_notify(nb, action, data,
1304 uncore_pci_sub_driver->id_table);
1307 static struct notifier_block uncore_pci_sub_notifier = {
1308 .notifier_call = uncore_pci_sub_bus_notify,
1311 static void uncore_pci_sub_driver_init(void)
1313 const struct pci_device_id *ids = uncore_pci_sub_driver->id_table;
1314 struct intel_uncore_type *type;
1315 struct intel_uncore_pmu *pmu;
1316 struct pci_dev *pci_sub_dev;
1317 bool notify = false;
1321 while (ids && ids->vendor) {
1323 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)];
1325 * Search the available device, and register the
1326 * corresponding PMU.
1328 while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1329 ids->device, pci_sub_dev))) {
1330 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
1331 UNCORE_PCI_DEV_FUNC(ids->driver_data));
1332 if (devfn != pci_sub_dev->devfn)
1335 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
1339 if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
1342 if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu,
1349 if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier))
1353 uncore_pci_sub_driver = NULL;
1356 static int uncore_pci_bus_notify(struct notifier_block *nb,
1357 unsigned long action, void *data)
1359 return uncore_bus_notify(nb, action, data, NULL);
1362 static struct notifier_block uncore_pci_notifier = {
1363 .notifier_call = uncore_pci_bus_notify,
1367 static void uncore_pci_pmus_register(void)
1369 struct intel_uncore_type **types = uncore_pci_uncores;
1370 struct intel_uncore_type *type;
1371 struct intel_uncore_pmu *pmu;
1372 struct pci_dev *pdev;
1376 for (; *types; types++) {
1378 for (die = 0; die < __uncore_max_dies; die++) {
1379 for (i = 0; i < type->num_boxes; i++) {
1380 if (!type->box_ctls[die])
1382 box_ctl = type->box_ctls[die] + type->pci_offsets[i];
1383 pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl),
1384 UNCORE_DISCOVERY_PCI_BUS(box_ctl),
1385 UNCORE_DISCOVERY_PCI_DEVFN(box_ctl));
1388 pmu = &type->pmus[i];
1390 uncore_pci_pmu_register(pdev, type, pmu, die);
1395 bus_register_notifier(&pci_bus_type, &uncore_pci_notifier);
1398 static int __init uncore_pci_init(void)
1403 size = uncore_max_dies() * sizeof(struct pci_extra_dev);
1404 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1405 if (!uncore_extra_pci_dev) {
1410 ret = uncore_types_init(uncore_pci_uncores, false);
1414 if (uncore_pci_driver) {
1415 uncore_pci_driver->probe = uncore_pci_probe;
1416 uncore_pci_driver->remove = uncore_pci_remove;
1418 ret = pci_register_driver(uncore_pci_driver);
1422 uncore_pci_pmus_register();
1424 if (uncore_pci_sub_driver)
1425 uncore_pci_sub_driver_init();
1427 pcidrv_registered = true;
1431 uncore_types_exit(uncore_pci_uncores);
1432 kfree(uncore_extra_pci_dev);
1433 uncore_extra_pci_dev = NULL;
1434 uncore_free_pcibus_map();
1436 uncore_pci_uncores = empty_uncore;
1440 static void uncore_pci_exit(void)
1442 if (pcidrv_registered) {
1443 pcidrv_registered = false;
1444 if (uncore_pci_sub_driver)
1445 bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier);
1446 if (uncore_pci_driver)
1447 pci_unregister_driver(uncore_pci_driver);
1449 bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier);
1450 uncore_types_exit(uncore_pci_uncores);
1451 kfree(uncore_extra_pci_dev);
1452 uncore_free_pcibus_map();
1456 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1459 struct intel_uncore_pmu *pmu = type->pmus;
1460 struct intel_uncore_box *box;
1463 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
1464 for (i = 0; i < type->num_boxes; i++, pmu++) {
1465 box = pmu->boxes[die];
1470 WARN_ON_ONCE(box->cpu != -1);
1475 WARN_ON_ONCE(box->cpu != old_cpu);
1480 uncore_pmu_cancel_hrtimer(box);
1481 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1486 static void uncore_change_context(struct intel_uncore_type **uncores,
1487 int old_cpu, int new_cpu)
1489 for (; *uncores; uncores++)
1490 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1493 static void uncore_box_unref(struct intel_uncore_type **types, int id)
1495 struct intel_uncore_type *type;
1496 struct intel_uncore_pmu *pmu;
1497 struct intel_uncore_box *box;
1500 for (; *types; types++) {
1503 for (i = 0; i < type->num_boxes; i++, pmu++) {
1504 box = pmu->boxes[id];
1505 if (box && atomic_dec_return(&box->refcnt) == 0)
1506 uncore_box_exit(box);
1511 static int uncore_event_cpu_offline(unsigned int cpu)
1515 /* Check if exiting cpu is used for collecting uncore events */
1516 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1518 /* Find a new cpu to collect uncore events */
1519 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
1521 /* Migrate uncore events to the new target */
1522 if (target < nr_cpu_ids)
1523 cpumask_set_cpu(target, &uncore_cpu_mask);
1527 uncore_change_context(uncore_msr_uncores, cpu, target);
1528 uncore_change_context(uncore_mmio_uncores, cpu, target);
1529 uncore_change_context(uncore_pci_uncores, cpu, target);
1532 /* Clear the references */
1533 die = topology_logical_die_id(cpu);
1534 uncore_box_unref(uncore_msr_uncores, die);
1535 uncore_box_unref(uncore_mmio_uncores, die);
1539 static int allocate_boxes(struct intel_uncore_type **types,
1540 unsigned int die, unsigned int cpu)
1542 struct intel_uncore_box *box, *tmp;
1543 struct intel_uncore_type *type;
1544 struct intel_uncore_pmu *pmu;
1545 LIST_HEAD(allocated);
1548 /* Try to allocate all required boxes */
1549 for (; *types; types++) {
1552 for (i = 0; i < type->num_boxes; i++, pmu++) {
1553 if (pmu->boxes[die])
1555 box = uncore_alloc_box(type, cpu_to_node(cpu));
1560 list_add(&box->active_list, &allocated);
1563 /* Install them in the pmus */
1564 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1565 list_del_init(&box->active_list);
1566 box->pmu->boxes[die] = box;
1571 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1572 list_del_init(&box->active_list);
1578 static int uncore_box_ref(struct intel_uncore_type **types,
1579 int id, unsigned int cpu)
1581 struct intel_uncore_type *type;
1582 struct intel_uncore_pmu *pmu;
1583 struct intel_uncore_box *box;
1586 ret = allocate_boxes(types, id, cpu);
1590 for (; *types; types++) {
1593 for (i = 0; i < type->num_boxes; i++, pmu++) {
1594 box = pmu->boxes[id];
1595 if (box && atomic_inc_return(&box->refcnt) == 1)
1596 uncore_box_init(box);
1602 static int uncore_event_cpu_online(unsigned int cpu)
1604 int die, target, msr_ret, mmio_ret;
1606 die = topology_logical_die_id(cpu);
1607 msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
1608 mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
1609 if (msr_ret && mmio_ret)
1613 * Check if there is an online cpu in the package
1614 * which collects uncore events already.
1616 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
1617 if (target < nr_cpu_ids)
1620 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1623 uncore_change_context(uncore_msr_uncores, -1, cpu);
1625 uncore_change_context(uncore_mmio_uncores, -1, cpu);
1626 uncore_change_context(uncore_pci_uncores, -1, cpu);
1630 static int __init type_pmu_register(struct intel_uncore_type *type)
1634 for (i = 0; i < type->num_boxes; i++) {
1635 ret = uncore_pmu_register(&type->pmus[i]);
1642 static int __init uncore_msr_pmus_register(void)
1644 struct intel_uncore_type **types = uncore_msr_uncores;
1647 for (; *types; types++) {
1648 ret = type_pmu_register(*types);
1655 static int __init uncore_cpu_init(void)
1659 ret = uncore_types_init(uncore_msr_uncores, true);
1663 ret = uncore_msr_pmus_register();
1668 uncore_types_exit(uncore_msr_uncores);
1669 uncore_msr_uncores = empty_uncore;
1673 static int __init uncore_mmio_init(void)
1675 struct intel_uncore_type **types = uncore_mmio_uncores;
1678 ret = uncore_types_init(types, true);
1682 for (; *types; types++) {
1683 ret = type_pmu_register(*types);
1689 uncore_types_exit(uncore_mmio_uncores);
1690 uncore_mmio_uncores = empty_uncore;
1694 struct intel_uncore_init_fun {
1695 void (*cpu_init)(void);
1696 int (*pci_init)(void);
1697 void (*mmio_init)(void);
1698 /* Discovery table is required */
1700 /* The units in the discovery table should be ignored. */
1701 int *uncore_units_ignore;
1704 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1705 .cpu_init = nhm_uncore_cpu_init,
1708 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1709 .cpu_init = snb_uncore_cpu_init,
1710 .pci_init = snb_uncore_pci_init,
1713 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1714 .cpu_init = snb_uncore_cpu_init,
1715 .pci_init = ivb_uncore_pci_init,
1718 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1719 .cpu_init = snb_uncore_cpu_init,
1720 .pci_init = hsw_uncore_pci_init,
1723 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1724 .cpu_init = snb_uncore_cpu_init,
1725 .pci_init = bdw_uncore_pci_init,
1728 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1729 .cpu_init = snbep_uncore_cpu_init,
1730 .pci_init = snbep_uncore_pci_init,
1733 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1734 .cpu_init = nhmex_uncore_cpu_init,
1737 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1738 .cpu_init = ivbep_uncore_cpu_init,
1739 .pci_init = ivbep_uncore_pci_init,
1742 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1743 .cpu_init = hswep_uncore_cpu_init,
1744 .pci_init = hswep_uncore_pci_init,
1747 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1748 .cpu_init = bdx_uncore_cpu_init,
1749 .pci_init = bdx_uncore_pci_init,
1752 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1753 .cpu_init = knl_uncore_cpu_init,
1754 .pci_init = knl_uncore_pci_init,
1757 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1758 .cpu_init = skl_uncore_cpu_init,
1759 .pci_init = skl_uncore_pci_init,
1762 static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1763 .cpu_init = skx_uncore_cpu_init,
1764 .pci_init = skx_uncore_pci_init,
1767 static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1768 .cpu_init = icl_uncore_cpu_init,
1769 .pci_init = skl_uncore_pci_init,
1772 static const struct intel_uncore_init_fun tgl_uncore_init __initconst = {
1773 .cpu_init = tgl_uncore_cpu_init,
1774 .mmio_init = tgl_uncore_mmio_init,
1777 static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
1778 .cpu_init = tgl_uncore_cpu_init,
1779 .mmio_init = tgl_l_uncore_mmio_init,
1782 static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
1783 .cpu_init = tgl_uncore_cpu_init,
1784 .pci_init = skl_uncore_pci_init,
1787 static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
1788 .cpu_init = adl_uncore_cpu_init,
1789 .mmio_init = adl_uncore_mmio_init,
1792 static const struct intel_uncore_init_fun mtl_uncore_init __initconst = {
1793 .cpu_init = mtl_uncore_cpu_init,
1794 .mmio_init = adl_uncore_mmio_init,
1797 static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
1798 .cpu_init = icx_uncore_cpu_init,
1799 .pci_init = icx_uncore_pci_init,
1800 .mmio_init = icx_uncore_mmio_init,
1803 static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
1804 .cpu_init = snr_uncore_cpu_init,
1805 .pci_init = snr_uncore_pci_init,
1806 .mmio_init = snr_uncore_mmio_init,
1809 static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
1810 .cpu_init = spr_uncore_cpu_init,
1811 .pci_init = spr_uncore_pci_init,
1812 .mmio_init = spr_uncore_mmio_init,
1813 .use_discovery = true,
1814 .uncore_units_ignore = spr_uncore_units_ignore,
1817 static const struct intel_uncore_init_fun gnr_uncore_init __initconst = {
1818 .cpu_init = gnr_uncore_cpu_init,
1819 .pci_init = gnr_uncore_pci_init,
1820 .mmio_init = gnr_uncore_mmio_init,
1821 .use_discovery = true,
1822 .uncore_units_ignore = gnr_uncore_units_ignore,
1825 static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
1826 .cpu_init = intel_uncore_generic_uncore_cpu_init,
1827 .pci_init = intel_uncore_generic_uncore_pci_init,
1828 .mmio_init = intel_uncore_generic_uncore_mmio_init,
1831 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1832 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
1833 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
1834 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init),
1835 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init),
1836 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init),
1837 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init),
1838 X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init),
1839 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init),
1840 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init),
1841 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init),
1842 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init),
1843 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init),
1844 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init),
1845 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init),
1846 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init),
1847 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init),
1848 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init),
1849 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init),
1850 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init),
1851 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init),
1852 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init),
1853 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init),
1854 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
1855 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
1856 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
1857 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init),
1858 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init),
1859 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
1860 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
1861 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
1862 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init),
1863 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init),
1864 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
1865 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
1866 X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
1867 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
1868 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
1869 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_uncore_init),
1870 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
1871 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
1872 X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &mtl_uncore_init),
1873 X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init),
1874 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
1875 X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
1876 X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init),
1877 X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init),
1878 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
1879 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init),
1880 X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init),
1881 X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init),
1884 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1886 static int __init intel_uncore_init(void)
1888 const struct x86_cpu_id *id;
1889 struct intel_uncore_init_fun *uncore_init;
1890 int pret = 0, cret = 0, mret = 0, ret;
1892 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1896 topology_max_packages() * topology_max_dies_per_package();
1898 id = x86_match_cpu(intel_uncore_match);
1900 if (!uncore_no_discover && intel_uncore_has_discovery_tables(NULL))
1901 uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
1905 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1906 if (uncore_no_discover && uncore_init->use_discovery)
1908 if (uncore_init->use_discovery &&
1909 !intel_uncore_has_discovery_tables(uncore_init->uncore_units_ignore))
1913 if (uncore_init->pci_init) {
1914 pret = uncore_init->pci_init();
1916 pret = uncore_pci_init();
1919 if (uncore_init->cpu_init) {
1920 uncore_init->cpu_init();
1921 cret = uncore_cpu_init();
1924 if (uncore_init->mmio_init) {
1925 uncore_init->mmio_init();
1926 mret = uncore_mmio_init();
1929 if (cret && pret && mret) {
1931 goto free_discovery;
1934 /* Install hotplug callbacks to setup the targets for each package */
1935 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1936 "perf/x86/intel/uncore:online",
1937 uncore_event_cpu_online,
1938 uncore_event_cpu_offline);
1944 uncore_types_exit(uncore_msr_uncores);
1945 uncore_types_exit(uncore_mmio_uncores);
1948 intel_uncore_clear_discovery_tables();
1951 module_init(intel_uncore_init);
1953 static void __exit intel_uncore_exit(void)
1955 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1956 uncore_types_exit(uncore_msr_uncores);
1957 uncore_types_exit(uncore_mmio_uncores);
1959 intel_uncore_clear_discovery_tables();
1961 module_exit(intel_uncore_exit);