1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
4 #include <asm/cpu_device_id.h>
5 #include <asm/intel-family.h>
8 static struct intel_uncore_type *empty_uncore[] = { NULL, };
9 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
10 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
12 static bool pcidrv_registered;
13 struct pci_driver *uncore_pci_driver;
14 /* pci bus to socket mapping */
15 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
16 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
17 struct pci_extra_dev *uncore_extra_pci_dev;
20 /* mask of cpus that collect uncore events */
21 static cpumask_t uncore_cpu_mask;
23 /* constraint for the fixed counter */
24 static struct event_constraint uncore_constraint_fixed =
25 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
26 struct event_constraint uncore_constraint_empty =
27 EVENT_CONSTRAINT(0, 0, 0);
29 MODULE_LICENSE("GPL");
31 static int uncore_pcibus_to_physid(struct pci_bus *bus)
33 struct pci2phy_map *map;
36 raw_spin_lock(&pci2phy_map_lock);
37 list_for_each_entry(map, &pci2phy_map_head, list) {
38 if (map->segment == pci_domain_nr(bus)) {
39 phys_id = map->pbus_to_physid[bus->number];
43 raw_spin_unlock(&pci2phy_map_lock);
48 static void uncore_free_pcibus_map(void)
50 struct pci2phy_map *map, *tmp;
52 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
58 struct pci2phy_map *__find_pci2phy_map(int segment)
60 struct pci2phy_map *map, *alloc = NULL;
63 lockdep_assert_held(&pci2phy_map_lock);
66 list_for_each_entry(map, &pci2phy_map_head, list) {
67 if (map->segment == segment)
72 raw_spin_unlock(&pci2phy_map_lock);
73 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
74 raw_spin_lock(&pci2phy_map_lock);
84 map->segment = segment;
85 for (i = 0; i < 256; i++)
86 map->pbus_to_physid[i] = -1;
87 list_add_tail(&map->list, &pci2phy_map_head);
94 ssize_t uncore_event_show(struct kobject *kobj,
95 struct kobj_attribute *attr, char *buf)
97 struct uncore_event_desc *event =
98 container_of(attr, struct uncore_event_desc, attr);
99 return sprintf(buf, "%s", event->config);
102 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
104 unsigned int dieid = topology_logical_die_id(cpu);
107 * The unsigned check also catches the '-1' return value for non
108 * existent mappings in the topology map.
110 return dieid < max_dies ? pmu->boxes[dieid] : NULL;
113 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
117 rdmsrl(event->hw.event_base, count);
123 * generic get constraint function for shared match/mask registers.
125 struct event_constraint *
126 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
128 struct intel_uncore_extra_reg *er;
129 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
130 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
135 * reg->alloc can be set due to existing state, so for fake box we
136 * need to ignore this, otherwise we might fail to allocate proper
137 * fake state for this extra reg constraint.
139 if (reg1->idx == EXTRA_REG_NONE ||
140 (!uncore_box_is_fake(box) && reg1->alloc))
143 er = &box->shared_regs[reg1->idx];
144 raw_spin_lock_irqsave(&er->lock, flags);
145 if (!atomic_read(&er->ref) ||
146 (er->config1 == reg1->config && er->config2 == reg2->config)) {
147 atomic_inc(&er->ref);
148 er->config1 = reg1->config;
149 er->config2 = reg2->config;
152 raw_spin_unlock_irqrestore(&er->lock, flags);
155 if (!uncore_box_is_fake(box))
160 return &uncore_constraint_empty;
163 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
165 struct intel_uncore_extra_reg *er;
166 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
169 * Only put constraint if extra reg was actually allocated. Also
170 * takes care of event which do not use an extra shared reg.
172 * Also, if this is a fake box we shouldn't touch any event state
173 * (reg->alloc) and we don't care about leaving inconsistent box
174 * state either since it will be thrown out.
176 if (uncore_box_is_fake(box) || !reg1->alloc)
179 er = &box->shared_regs[reg1->idx];
180 atomic_dec(&er->ref);
184 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
186 struct intel_uncore_extra_reg *er;
190 er = &box->shared_regs[idx];
192 raw_spin_lock_irqsave(&er->lock, flags);
194 raw_spin_unlock_irqrestore(&er->lock, flags);
199 static void uncore_assign_hw_event(struct intel_uncore_box *box,
200 struct perf_event *event, int idx)
202 struct hw_perf_event *hwc = &event->hw;
205 hwc->last_tag = ++box->tags[idx];
207 if (uncore_pmc_fixed(hwc->idx)) {
208 hwc->event_base = uncore_fixed_ctr(box);
209 hwc->config_base = uncore_fixed_ctl(box);
213 hwc->config_base = uncore_event_ctl(box, hwc->idx);
214 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
217 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
219 u64 prev_count, new_count, delta;
222 if (uncore_pmc_freerunning(event->hw.idx))
223 shift = 64 - uncore_freerunning_bits(box, event);
224 else if (uncore_pmc_fixed(event->hw.idx))
225 shift = 64 - uncore_fixed_ctr_bits(box);
227 shift = 64 - uncore_perf_ctr_bits(box);
229 /* the hrtimer might modify the previous event value */
231 prev_count = local64_read(&event->hw.prev_count);
232 new_count = uncore_read_counter(box, event);
233 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
236 delta = (new_count << shift) - (prev_count << shift);
239 local64_add(delta, &event->count);
243 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
244 * for SandyBridge. So we use hrtimer to periodically poll the counter
247 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
249 struct intel_uncore_box *box;
250 struct perf_event *event;
254 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
255 if (!box->n_active || box->cpu != smp_processor_id())
256 return HRTIMER_NORESTART;
258 * disable local interrupt to prevent uncore_pmu_event_start/stop
259 * to interrupt the update process
261 local_irq_save(flags);
264 * handle boxes with an active event list as opposed to active
267 list_for_each_entry(event, &box->active_list, active_entry) {
268 uncore_perf_event_update(box, event);
271 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
272 uncore_perf_event_update(box, box->events[bit]);
274 local_irq_restore(flags);
276 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
277 return HRTIMER_RESTART;
280 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
282 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
283 HRTIMER_MODE_REL_PINNED);
286 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
288 hrtimer_cancel(&box->hrtimer);
291 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
293 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
294 box->hrtimer.function = uncore_pmu_hrtimer;
297 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
300 int i, size, numshared = type->num_shared_regs ;
301 struct intel_uncore_box *box;
303 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
305 box = kzalloc_node(size, GFP_KERNEL, node);
309 for (i = 0; i < numshared; i++)
310 raw_spin_lock_init(&box->shared_regs[i].lock);
312 uncore_pmu_init_hrtimer(box);
314 box->pci_phys_id = -1;
317 /* set default hrtimer timeout */
318 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
320 INIT_LIST_HEAD(&box->active_list);
326 * Using uncore_pmu_event_init pmu event_init callback
327 * as a detection point for uncore events.
329 static int uncore_pmu_event_init(struct perf_event *event);
331 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
333 return &box->pmu->pmu == event->pmu;
337 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
340 struct perf_event *event;
343 max_count = box->pmu->type->num_counters;
344 if (box->pmu->type->fixed_ctl)
347 if (box->n_events >= max_count)
352 if (is_box_event(box, leader)) {
353 box->event_list[n] = leader;
360 for_each_sibling_event(event, leader) {
361 if (!is_box_event(box, event) ||
362 event->state <= PERF_EVENT_STATE_OFF)
368 box->event_list[n] = event;
374 static struct event_constraint *
375 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
377 struct intel_uncore_type *type = box->pmu->type;
378 struct event_constraint *c;
380 if (type->ops->get_constraint) {
381 c = type->ops->get_constraint(box, event);
386 if (event->attr.config == UNCORE_FIXED_EVENT)
387 return &uncore_constraint_fixed;
389 if (type->constraints) {
390 for_each_event_constraint(c, type->constraints) {
391 if ((event->hw.config & c->cmask) == c->code)
396 return &type->unconstrainted;
399 static void uncore_put_event_constraint(struct intel_uncore_box *box,
400 struct perf_event *event)
402 if (box->pmu->type->ops->put_constraint)
403 box->pmu->type->ops->put_constraint(box, event);
406 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
408 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
409 struct event_constraint *c;
410 int i, wmin, wmax, ret = 0;
411 struct hw_perf_event *hwc;
413 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
415 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
416 c = uncore_get_event_constraint(box, box->event_list[i]);
417 box->event_constraint[i] = c;
418 wmin = min(wmin, c->weight);
419 wmax = max(wmax, c->weight);
422 /* fastpath, try to reuse previous register */
423 for (i = 0; i < n; i++) {
424 hwc = &box->event_list[i]->hw;
425 c = box->event_constraint[i];
431 /* constraint still honored */
432 if (!test_bit(hwc->idx, c->idxmsk))
435 /* not already used */
436 if (test_bit(hwc->idx, used_mask))
439 __set_bit(hwc->idx, used_mask);
441 assign[i] = hwc->idx;
445 ret = perf_assign_events(box->event_constraint, n,
446 wmin, wmax, n, assign);
448 if (!assign || ret) {
449 for (i = 0; i < n; i++)
450 uncore_put_event_constraint(box, box->event_list[i]);
452 return ret ? -EINVAL : 0;
455 void uncore_pmu_event_start(struct perf_event *event, int flags)
457 struct intel_uncore_box *box = uncore_event_to_box(event);
458 int idx = event->hw.idx;
460 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
464 * Free running counter is read-only and always active.
465 * Use the current counter value as start point.
466 * There is no overflow interrupt for free running counter.
467 * Use hrtimer to periodically poll the counter to avoid overflow.
469 if (uncore_pmc_freerunning(event->hw.idx)) {
470 list_add_tail(&event->active_entry, &box->active_list);
471 local64_set(&event->hw.prev_count,
472 uncore_read_counter(box, event));
473 if (box->n_active++ == 0)
474 uncore_pmu_start_hrtimer(box);
478 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
482 box->events[idx] = event;
484 __set_bit(idx, box->active_mask);
486 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
487 uncore_enable_event(box, event);
489 if (box->n_active == 1) {
490 uncore_enable_box(box);
491 uncore_pmu_start_hrtimer(box);
495 void uncore_pmu_event_stop(struct perf_event *event, int flags)
497 struct intel_uncore_box *box = uncore_event_to_box(event);
498 struct hw_perf_event *hwc = &event->hw;
500 /* Cannot disable free running counter which is read-only */
501 if (uncore_pmc_freerunning(hwc->idx)) {
502 list_del(&event->active_entry);
503 if (--box->n_active == 0)
504 uncore_pmu_cancel_hrtimer(box);
505 uncore_perf_event_update(box, event);
509 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
510 uncore_disable_event(box, event);
512 box->events[hwc->idx] = NULL;
513 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
514 hwc->state |= PERF_HES_STOPPED;
516 if (box->n_active == 0) {
517 uncore_disable_box(box);
518 uncore_pmu_cancel_hrtimer(box);
522 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
524 * Drain the remaining delta count out of a event
525 * that we are disabling:
527 uncore_perf_event_update(box, event);
528 hwc->state |= PERF_HES_UPTODATE;
532 int uncore_pmu_event_add(struct perf_event *event, int flags)
534 struct intel_uncore_box *box = uncore_event_to_box(event);
535 struct hw_perf_event *hwc = &event->hw;
536 int assign[UNCORE_PMC_IDX_MAX];
543 * The free funning counter is assigned in event_init().
544 * The free running counter event and free running counter
545 * are 1:1 mapped. It doesn't need to be tracked in event_list.
547 if (uncore_pmc_freerunning(hwc->idx)) {
548 if (flags & PERF_EF_START)
549 uncore_pmu_event_start(event, 0);
553 ret = n = uncore_collect_events(box, event, false);
557 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
558 if (!(flags & PERF_EF_START))
559 hwc->state |= PERF_HES_ARCH;
561 ret = uncore_assign_events(box, assign, n);
565 /* save events moving to new counters */
566 for (i = 0; i < box->n_events; i++) {
567 event = box->event_list[i];
570 if (hwc->idx == assign[i] &&
571 hwc->last_tag == box->tags[assign[i]])
574 * Ensure we don't accidentally enable a stopped
575 * counter simply because we rescheduled.
577 if (hwc->state & PERF_HES_STOPPED)
578 hwc->state |= PERF_HES_ARCH;
580 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
583 /* reprogram moved events into new counters */
584 for (i = 0; i < n; i++) {
585 event = box->event_list[i];
588 if (hwc->idx != assign[i] ||
589 hwc->last_tag != box->tags[assign[i]])
590 uncore_assign_hw_event(box, event, assign[i]);
591 else if (i < box->n_events)
594 if (hwc->state & PERF_HES_ARCH)
597 uncore_pmu_event_start(event, 0);
604 void uncore_pmu_event_del(struct perf_event *event, int flags)
606 struct intel_uncore_box *box = uncore_event_to_box(event);
609 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
612 * The event for free running counter is not tracked by event_list.
613 * It doesn't need to force event->hw.idx = -1 to reassign the counter.
614 * Because the event and the free running counter are 1:1 mapped.
616 if (uncore_pmc_freerunning(event->hw.idx))
619 for (i = 0; i < box->n_events; i++) {
620 if (event == box->event_list[i]) {
621 uncore_put_event_constraint(box, event);
623 for (++i; i < box->n_events; i++)
624 box->event_list[i - 1] = box->event_list[i];
632 event->hw.last_tag = ~0ULL;
635 void uncore_pmu_event_read(struct perf_event *event)
637 struct intel_uncore_box *box = uncore_event_to_box(event);
638 uncore_perf_event_update(box, event);
642 * validation ensures the group can be loaded onto the
643 * PMU if it was the only group available.
645 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
646 struct perf_event *event)
648 struct perf_event *leader = event->group_leader;
649 struct intel_uncore_box *fake_box;
650 int ret = -EINVAL, n;
652 /* The free running counter is always active. */
653 if (uncore_pmc_freerunning(event->hw.idx))
656 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
662 * the event is not yet connected with its
663 * siblings therefore we must first collect
664 * existing siblings, then add the new event
665 * before we can simulate the scheduling
667 n = uncore_collect_events(fake_box, leader, true);
671 fake_box->n_events = n;
672 n = uncore_collect_events(fake_box, event, false);
676 fake_box->n_events = n;
678 ret = uncore_assign_events(fake_box, NULL, n);
684 static int uncore_pmu_event_init(struct perf_event *event)
686 struct intel_uncore_pmu *pmu;
687 struct intel_uncore_box *box;
688 struct hw_perf_event *hwc = &event->hw;
691 if (event->attr.type != event->pmu->type)
694 pmu = uncore_event_to_pmu(event);
695 /* no device found for this pmu */
696 if (pmu->func_id < 0)
699 /* Sampling not supported yet */
700 if (hwc->sample_period)
704 * Place all uncore events for a particular physical package
709 box = uncore_pmu_to_box(pmu, event->cpu);
710 if (!box || box->cpu < 0)
712 event->cpu = box->cpu;
713 event->pmu_private = box;
715 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
718 event->hw.last_tag = ~0ULL;
719 event->hw.extra_reg.idx = EXTRA_REG_NONE;
720 event->hw.branch_reg.idx = EXTRA_REG_NONE;
722 if (event->attr.config == UNCORE_FIXED_EVENT) {
723 /* no fixed counter */
724 if (!pmu->type->fixed_ctl)
727 * if there is only one fixed counter, only the first pmu
728 * can access the fixed counter
730 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
733 /* fixed counters have event field hardcoded to zero */
735 } else if (is_freerunning_event(event)) {
736 hwc->config = event->attr.config;
737 if (!check_valid_freerunning_event(box, event))
739 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
741 * The free running counter event and free running counter
742 * are always 1:1 mapped.
743 * The free running counter is always active.
744 * Assign the free running counter here.
746 event->hw.event_base = uncore_freerunning_counter(box, event);
748 hwc->config = event->attr.config &
749 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
750 if (pmu->type->ops->hw_config) {
751 ret = pmu->type->ops->hw_config(box, event);
757 if (event->group_leader != event)
758 ret = uncore_validate_group(pmu, event);
765 static ssize_t uncore_get_attr_cpumask(struct device *dev,
766 struct device_attribute *attr, char *buf)
768 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
771 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
773 static struct attribute *uncore_pmu_attrs[] = {
774 &dev_attr_cpumask.attr,
778 static const struct attribute_group uncore_pmu_attr_group = {
779 .attrs = uncore_pmu_attrs,
782 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
786 if (!pmu->type->pmu) {
787 pmu->pmu = (struct pmu) {
788 .attr_groups = pmu->type->attr_groups,
789 .task_ctx_nr = perf_invalid_context,
790 .event_init = uncore_pmu_event_init,
791 .add = uncore_pmu_event_add,
792 .del = uncore_pmu_event_del,
793 .start = uncore_pmu_event_start,
794 .stop = uncore_pmu_event_stop,
795 .read = uncore_pmu_event_read,
796 .module = THIS_MODULE,
797 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
800 pmu->pmu = *pmu->type->pmu;
801 pmu->pmu.attr_groups = pmu->type->attr_groups;
804 if (pmu->type->num_boxes == 1) {
805 if (strlen(pmu->type->name) > 0)
806 sprintf(pmu->name, "uncore_%s", pmu->type->name);
808 sprintf(pmu->name, "uncore");
810 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
814 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
816 pmu->registered = true;
820 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
822 if (!pmu->registered)
824 perf_pmu_unregister(&pmu->pmu);
825 pmu->registered = false;
828 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
832 for (die = 0; die < max_dies; die++)
833 kfree(pmu->boxes[die]);
837 static void uncore_type_exit(struct intel_uncore_type *type)
839 struct intel_uncore_pmu *pmu = type->pmus;
843 for (i = 0; i < type->num_boxes; i++, pmu++) {
844 uncore_pmu_unregister(pmu);
845 uncore_free_boxes(pmu);
850 kfree(type->events_group);
851 type->events_group = NULL;
854 static void uncore_types_exit(struct intel_uncore_type **types)
856 for (; *types; types++)
857 uncore_type_exit(*types);
860 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
862 struct intel_uncore_pmu *pmus;
866 pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
870 size = max_dies * sizeof(struct intel_uncore_box *);
872 for (i = 0; i < type->num_boxes; i++) {
873 pmus[i].func_id = setid ? i : -1;
876 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
882 type->unconstrainted = (struct event_constraint)
883 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
884 0, type->num_counters, 0, 0);
886 if (type->event_descs) {
888 struct attribute_group group;
889 struct attribute *attrs[];
891 for (i = 0; type->event_descs[i].attr.attr.name; i++);
893 attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
898 attr_group->group.name = "events";
899 attr_group->group.attrs = attr_group->attrs;
901 for (j = 0; j < i; j++)
902 attr_group->attrs[j] = &type->event_descs[j].attr.attr;
904 type->events_group = &attr_group->group;
907 type->pmu_group = &uncore_pmu_attr_group;
912 for (i = 0; i < type->num_boxes; i++)
913 kfree(pmus[i].boxes);
920 uncore_types_init(struct intel_uncore_type **types, bool setid)
924 for (; *types; types++) {
925 ret = uncore_type_init(*types, setid);
933 * add a pci uncore device
935 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
937 struct intel_uncore_type *type;
938 struct intel_uncore_pmu *pmu = NULL;
939 struct intel_uncore_box *box;
940 int phys_id, die, ret;
942 phys_id = uncore_pcibus_to_physid(pdev->bus);
946 die = (topology_max_die_per_package() > 1) ? phys_id :
947 topology_phys_to_logical_pkg(phys_id);
951 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
952 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
954 uncore_extra_pci_dev[die].dev[idx] = pdev;
955 pci_set_drvdata(pdev, NULL);
959 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
962 * Some platforms, e.g. Knights Landing, use a common PCI device ID
963 * for multiple instances of an uncore PMU device type. We should check
964 * PCI slot and func to indicate the uncore box.
966 if (id->driver_data & ~0xffff) {
967 struct pci_driver *pci_drv = pdev->driver;
968 const struct pci_device_id *ids = pci_drv->id_table;
971 while (ids && ids->vendor) {
972 if ((ids->vendor == pdev->vendor) &&
973 (ids->device == pdev->device)) {
974 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
975 UNCORE_PCI_DEV_FUNC(ids->driver_data));
976 if (devfn == pdev->devfn) {
977 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
987 * for performance monitoring unit with multiple boxes,
988 * each box has a different function id.
990 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
993 if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
996 box = uncore_alloc_box(type, NUMA_NO_NODE);
1000 if (pmu->func_id < 0)
1001 pmu->func_id = pdev->devfn;
1003 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1005 atomic_inc(&box->refcnt);
1006 box->pci_phys_id = phys_id;
1008 box->pci_dev = pdev;
1010 uncore_box_init(box);
1011 pci_set_drvdata(pdev, box);
1013 pmu->boxes[die] = box;
1014 if (atomic_inc_return(&pmu->activeboxes) > 1)
1017 /* First active box registers the pmu */
1018 ret = uncore_pmu_register(pmu);
1020 pci_set_drvdata(pdev, NULL);
1021 pmu->boxes[die] = NULL;
1022 uncore_box_exit(box);
1028 static void uncore_pci_remove(struct pci_dev *pdev)
1030 struct intel_uncore_box *box;
1031 struct intel_uncore_pmu *pmu;
1032 int i, phys_id, die;
1034 phys_id = uncore_pcibus_to_physid(pdev->bus);
1036 box = pci_get_drvdata(pdev);
1038 die = (topology_max_die_per_package() > 1) ? phys_id :
1039 topology_phys_to_logical_pkg(phys_id);
1040 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1041 if (uncore_extra_pci_dev[die].dev[i] == pdev) {
1042 uncore_extra_pci_dev[die].dev[i] = NULL;
1046 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1051 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1054 pci_set_drvdata(pdev, NULL);
1055 pmu->boxes[box->dieid] = NULL;
1056 if (atomic_dec_return(&pmu->activeboxes) == 0)
1057 uncore_pmu_unregister(pmu);
1058 uncore_box_exit(box);
1062 static int __init uncore_pci_init(void)
1067 size = max_dies * sizeof(struct pci_extra_dev);
1068 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1069 if (!uncore_extra_pci_dev) {
1074 ret = uncore_types_init(uncore_pci_uncores, false);
1078 uncore_pci_driver->probe = uncore_pci_probe;
1079 uncore_pci_driver->remove = uncore_pci_remove;
1081 ret = pci_register_driver(uncore_pci_driver);
1085 pcidrv_registered = true;
1089 uncore_types_exit(uncore_pci_uncores);
1090 kfree(uncore_extra_pci_dev);
1091 uncore_extra_pci_dev = NULL;
1092 uncore_free_pcibus_map();
1094 uncore_pci_uncores = empty_uncore;
1098 static void uncore_pci_exit(void)
1100 if (pcidrv_registered) {
1101 pcidrv_registered = false;
1102 pci_unregister_driver(uncore_pci_driver);
1103 uncore_types_exit(uncore_pci_uncores);
1104 kfree(uncore_extra_pci_dev);
1105 uncore_free_pcibus_map();
1109 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1112 struct intel_uncore_pmu *pmu = type->pmus;
1113 struct intel_uncore_box *box;
1116 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
1117 for (i = 0; i < type->num_boxes; i++, pmu++) {
1118 box = pmu->boxes[die];
1123 WARN_ON_ONCE(box->cpu != -1);
1128 WARN_ON_ONCE(box->cpu != old_cpu);
1133 uncore_pmu_cancel_hrtimer(box);
1134 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1139 static void uncore_change_context(struct intel_uncore_type **uncores,
1140 int old_cpu, int new_cpu)
1142 for (; *uncores; uncores++)
1143 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1146 static int uncore_event_cpu_offline(unsigned int cpu)
1148 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1149 struct intel_uncore_pmu *pmu;
1150 struct intel_uncore_box *box;
1153 /* Check if exiting cpu is used for collecting uncore events */
1154 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1156 /* Find a new cpu to collect uncore events */
1157 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
1159 /* Migrate uncore events to the new target */
1160 if (target < nr_cpu_ids)
1161 cpumask_set_cpu(target, &uncore_cpu_mask);
1165 uncore_change_context(uncore_msr_uncores, cpu, target);
1166 uncore_change_context(uncore_pci_uncores, cpu, target);
1169 /* Clear the references */
1170 die = topology_logical_die_id(cpu);
1171 for (; *types; types++) {
1174 for (i = 0; i < type->num_boxes; i++, pmu++) {
1175 box = pmu->boxes[die];
1176 if (box && atomic_dec_return(&box->refcnt) == 0)
1177 uncore_box_exit(box);
1183 static int allocate_boxes(struct intel_uncore_type **types,
1184 unsigned int die, unsigned int cpu)
1186 struct intel_uncore_box *box, *tmp;
1187 struct intel_uncore_type *type;
1188 struct intel_uncore_pmu *pmu;
1189 LIST_HEAD(allocated);
1192 /* Try to allocate all required boxes */
1193 for (; *types; types++) {
1196 for (i = 0; i < type->num_boxes; i++, pmu++) {
1197 if (pmu->boxes[die])
1199 box = uncore_alloc_box(type, cpu_to_node(cpu));
1204 list_add(&box->active_list, &allocated);
1207 /* Install them in the pmus */
1208 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1209 list_del_init(&box->active_list);
1210 box->pmu->boxes[die] = box;
1215 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1216 list_del_init(&box->active_list);
1222 static int uncore_event_cpu_online(unsigned int cpu)
1224 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1225 struct intel_uncore_pmu *pmu;
1226 struct intel_uncore_box *box;
1227 int i, ret, die, target;
1229 die = topology_logical_die_id(cpu);
1230 ret = allocate_boxes(types, die, cpu);
1234 for (; *types; types++) {
1237 for (i = 0; i < type->num_boxes; i++, pmu++) {
1238 box = pmu->boxes[die];
1239 if (box && atomic_inc_return(&box->refcnt) == 1)
1240 uncore_box_init(box);
1245 * Check if there is an online cpu in the package
1246 * which collects uncore events already.
1248 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
1249 if (target < nr_cpu_ids)
1252 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1254 uncore_change_context(uncore_msr_uncores, -1, cpu);
1255 uncore_change_context(uncore_pci_uncores, -1, cpu);
1259 static int __init type_pmu_register(struct intel_uncore_type *type)
1263 for (i = 0; i < type->num_boxes; i++) {
1264 ret = uncore_pmu_register(&type->pmus[i]);
1271 static int __init uncore_msr_pmus_register(void)
1273 struct intel_uncore_type **types = uncore_msr_uncores;
1276 for (; *types; types++) {
1277 ret = type_pmu_register(*types);
1284 static int __init uncore_cpu_init(void)
1288 ret = uncore_types_init(uncore_msr_uncores, true);
1292 ret = uncore_msr_pmus_register();
1297 uncore_types_exit(uncore_msr_uncores);
1298 uncore_msr_uncores = empty_uncore;
1302 #define X86_UNCORE_MODEL_MATCH(model, init) \
1303 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1305 struct intel_uncore_init_fun {
1306 void (*cpu_init)(void);
1307 int (*pci_init)(void);
1310 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1311 .cpu_init = nhm_uncore_cpu_init,
1314 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1315 .cpu_init = snb_uncore_cpu_init,
1316 .pci_init = snb_uncore_pci_init,
1319 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1320 .cpu_init = snb_uncore_cpu_init,
1321 .pci_init = ivb_uncore_pci_init,
1324 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1325 .cpu_init = snb_uncore_cpu_init,
1326 .pci_init = hsw_uncore_pci_init,
1329 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1330 .cpu_init = snb_uncore_cpu_init,
1331 .pci_init = bdw_uncore_pci_init,
1334 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1335 .cpu_init = snbep_uncore_cpu_init,
1336 .pci_init = snbep_uncore_pci_init,
1339 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1340 .cpu_init = nhmex_uncore_cpu_init,
1343 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1344 .cpu_init = ivbep_uncore_cpu_init,
1345 .pci_init = ivbep_uncore_pci_init,
1348 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1349 .cpu_init = hswep_uncore_cpu_init,
1350 .pci_init = hswep_uncore_pci_init,
1353 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1354 .cpu_init = bdx_uncore_cpu_init,
1355 .pci_init = bdx_uncore_pci_init,
1358 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1359 .cpu_init = knl_uncore_cpu_init,
1360 .pci_init = knl_uncore_pci_init,
1363 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1364 .cpu_init = skl_uncore_cpu_init,
1365 .pci_init = skl_uncore_pci_init,
1368 static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1369 .cpu_init = skx_uncore_cpu_init,
1370 .pci_init = skx_uncore_pci_init,
1373 static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1374 .cpu_init = icl_uncore_cpu_init,
1375 .pci_init = skl_uncore_pci_init,
1378 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1379 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1380 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
1381 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
1382 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
1383 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
1384 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
1385 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
1386 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
1387 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
1388 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1389 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1390 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
1391 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
1392 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
1393 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
1394 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
1395 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1396 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1397 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1398 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
1399 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1400 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1401 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
1402 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
1403 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
1404 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
1405 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
1409 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1411 static int __init intel_uncore_init(void)
1413 const struct x86_cpu_id *id;
1414 struct intel_uncore_init_fun *uncore_init;
1415 int pret = 0, cret = 0, ret;
1417 id = x86_match_cpu(intel_uncore_match);
1421 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1424 max_dies = topology_max_packages() * topology_max_die_per_package();
1426 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1427 if (uncore_init->pci_init) {
1428 pret = uncore_init->pci_init();
1430 pret = uncore_pci_init();
1433 if (uncore_init->cpu_init) {
1434 uncore_init->cpu_init();
1435 cret = uncore_cpu_init();
1441 /* Install hotplug callbacks to setup the targets for each package */
1442 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1443 "perf/x86/intel/uncore:online",
1444 uncore_event_cpu_online,
1445 uncore_event_cpu_offline);
1451 uncore_types_exit(uncore_msr_uncores);
1455 module_init(intel_uncore_init);
1457 static void __exit intel_uncore_exit(void)
1459 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1460 uncore_types_exit(uncore_msr_uncores);
1463 module_exit(intel_uncore_exit);