1 #include "perf_event_intel_uncore.h"
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
5 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
7 static bool pcidrv_registered;
8 struct pci_driver *uncore_pci_driver;
9 /* pci bus to socket mapping */
10 int uncore_pcibus_to_physid[256] = { [0 ... 255] = -1, };
11 struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
13 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
14 /* mask of cpus that collect uncore events */
15 static cpumask_t uncore_cpu_mask;
17 /* constraint for the fixed counter */
18 static struct event_constraint uncore_constraint_fixed =
19 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
20 struct event_constraint uncore_constraint_empty =
21 EVENT_CONSTRAINT(0, 0, 0);
23 ssize_t uncore_event_show(struct kobject *kobj,
24 struct kobj_attribute *attr, char *buf)
26 struct uncore_event_desc *event =
27 container_of(attr, struct uncore_event_desc, attr);
28 return sprintf(buf, "%s", event->config);
31 struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
33 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
36 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
38 struct intel_uncore_box *box;
40 box = *per_cpu_ptr(pmu->box, cpu);
44 raw_spin_lock(&uncore_box_lock);
45 /* Recheck in lock to handle races. */
46 if (*per_cpu_ptr(pmu->box, cpu))
48 list_for_each_entry(box, &pmu->box_list, list) {
49 if (box->phys_id == topology_physical_package_id(cpu)) {
50 atomic_inc(&box->refcnt);
51 *per_cpu_ptr(pmu->box, cpu) = box;
56 raw_spin_unlock(&uncore_box_lock);
58 return *per_cpu_ptr(pmu->box, cpu);
61 struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
64 * perf core schedules event on the basis of cpu, uncore events are
65 * collected by one of the cpus inside a physical package.
67 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
70 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
74 rdmsrl(event->hw.event_base, count);
80 * generic get constraint function for shared match/mask registers.
82 struct event_constraint *
83 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
85 struct intel_uncore_extra_reg *er;
86 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
87 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
92 * reg->alloc can be set due to existing state, so for fake box we
93 * need to ignore this, otherwise we might fail to allocate proper
94 * fake state for this extra reg constraint.
96 if (reg1->idx == EXTRA_REG_NONE ||
97 (!uncore_box_is_fake(box) && reg1->alloc))
100 er = &box->shared_regs[reg1->idx];
101 raw_spin_lock_irqsave(&er->lock, flags);
102 if (!atomic_read(&er->ref) ||
103 (er->config1 == reg1->config && er->config2 == reg2->config)) {
104 atomic_inc(&er->ref);
105 er->config1 = reg1->config;
106 er->config2 = reg2->config;
109 raw_spin_unlock_irqrestore(&er->lock, flags);
112 if (!uncore_box_is_fake(box))
117 return &uncore_constraint_empty;
120 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
122 struct intel_uncore_extra_reg *er;
123 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
126 * Only put constraint if extra reg was actually allocated. Also
127 * takes care of event which do not use an extra shared reg.
129 * Also, if this is a fake box we shouldn't touch any event state
130 * (reg->alloc) and we don't care about leaving inconsistent box
131 * state either since it will be thrown out.
133 if (uncore_box_is_fake(box) || !reg1->alloc)
136 er = &box->shared_regs[reg1->idx];
137 atomic_dec(&er->ref);
141 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
143 struct intel_uncore_extra_reg *er;
147 er = &box->shared_regs[idx];
149 raw_spin_lock_irqsave(&er->lock, flags);
151 raw_spin_unlock_irqrestore(&er->lock, flags);
156 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
158 struct hw_perf_event *hwc = &event->hw;
161 hwc->last_tag = ++box->tags[idx];
163 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
164 hwc->event_base = uncore_fixed_ctr(box);
165 hwc->config_base = uncore_fixed_ctl(box);
169 hwc->config_base = uncore_event_ctl(box, hwc->idx);
170 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
173 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
175 u64 prev_count, new_count, delta;
178 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
179 shift = 64 - uncore_fixed_ctr_bits(box);
181 shift = 64 - uncore_perf_ctr_bits(box);
183 /* the hrtimer might modify the previous event value */
185 prev_count = local64_read(&event->hw.prev_count);
186 new_count = uncore_read_counter(box, event);
187 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
190 delta = (new_count << shift) - (prev_count << shift);
193 local64_add(delta, &event->count);
197 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
198 * for SandyBridge. So we use hrtimer to periodically poll the counter
201 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
203 struct intel_uncore_box *box;
204 struct perf_event *event;
208 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
209 if (!box->n_active || box->cpu != smp_processor_id())
210 return HRTIMER_NORESTART;
212 * disable local interrupt to prevent uncore_pmu_event_start/stop
213 * to interrupt the update process
215 local_irq_save(flags);
218 * handle boxes with an active event list as opposed to active
221 list_for_each_entry(event, &box->active_list, active_entry) {
222 uncore_perf_event_update(box, event);
225 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
226 uncore_perf_event_update(box, box->events[bit]);
228 local_irq_restore(flags);
230 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
231 return HRTIMER_RESTART;
234 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
236 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
237 HRTIMER_MODE_REL_PINNED);
240 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
242 hrtimer_cancel(&box->hrtimer);
245 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
247 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
248 box->hrtimer.function = uncore_pmu_hrtimer;
251 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
253 struct intel_uncore_box *box;
256 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
258 box = kzalloc_node(size, GFP_KERNEL, node);
262 for (i = 0; i < type->num_shared_regs; i++)
263 raw_spin_lock_init(&box->shared_regs[i].lock);
265 uncore_pmu_init_hrtimer(box);
266 atomic_set(&box->refcnt, 1);
270 /* set default hrtimer timeout */
271 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
273 INIT_LIST_HEAD(&box->active_list);
279 * Using uncore_pmu_event_init pmu event_init callback
280 * as a detection point for uncore events.
282 static int uncore_pmu_event_init(struct perf_event *event);
284 static bool is_uncore_event(struct perf_event *event)
286 return event->pmu->event_init == uncore_pmu_event_init;
290 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
292 struct perf_event *event;
295 max_count = box->pmu->type->num_counters;
296 if (box->pmu->type->fixed_ctl)
299 if (box->n_events >= max_count)
304 if (is_uncore_event(leader)) {
305 box->event_list[n] = leader;
312 list_for_each_entry(event, &leader->sibling_list, group_entry) {
313 if (!is_uncore_event(event) ||
314 event->state <= PERF_EVENT_STATE_OFF)
320 box->event_list[n] = event;
326 static struct event_constraint *
327 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
329 struct intel_uncore_type *type = box->pmu->type;
330 struct event_constraint *c;
332 if (type->ops->get_constraint) {
333 c = type->ops->get_constraint(box, event);
338 if (event->attr.config == UNCORE_FIXED_EVENT)
339 return &uncore_constraint_fixed;
341 if (type->constraints) {
342 for_each_event_constraint(c, type->constraints) {
343 if ((event->hw.config & c->cmask) == c->code)
348 return &type->unconstrainted;
351 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
353 if (box->pmu->type->ops->put_constraint)
354 box->pmu->type->ops->put_constraint(box, event);
357 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
359 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
360 struct event_constraint *c;
361 int i, wmin, wmax, ret = 0;
362 struct hw_perf_event *hwc;
364 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
366 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
367 c = uncore_get_event_constraint(box, box->event_list[i]);
368 box->event_constraint[i] = c;
369 wmin = min(wmin, c->weight);
370 wmax = max(wmax, c->weight);
373 /* fastpath, try to reuse previous register */
374 for (i = 0; i < n; i++) {
375 hwc = &box->event_list[i]->hw;
376 c = box->event_constraint[i];
382 /* constraint still honored */
383 if (!test_bit(hwc->idx, c->idxmsk))
386 /* not already used */
387 if (test_bit(hwc->idx, used_mask))
390 __set_bit(hwc->idx, used_mask);
392 assign[i] = hwc->idx;
396 ret = perf_assign_events(box->event_constraint, n,
397 wmin, wmax, n, assign);
399 if (!assign || ret) {
400 for (i = 0; i < n; i++)
401 uncore_put_event_constraint(box, box->event_list[i]);
403 return ret ? -EINVAL : 0;
406 static void uncore_pmu_event_start(struct perf_event *event, int flags)
408 struct intel_uncore_box *box = uncore_event_to_box(event);
409 int idx = event->hw.idx;
411 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
414 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
418 box->events[idx] = event;
420 __set_bit(idx, box->active_mask);
422 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
423 uncore_enable_event(box, event);
425 if (box->n_active == 1) {
426 uncore_enable_box(box);
427 uncore_pmu_start_hrtimer(box);
431 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
433 struct intel_uncore_box *box = uncore_event_to_box(event);
434 struct hw_perf_event *hwc = &event->hw;
436 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
437 uncore_disable_event(box, event);
439 box->events[hwc->idx] = NULL;
440 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
441 hwc->state |= PERF_HES_STOPPED;
443 if (box->n_active == 0) {
444 uncore_disable_box(box);
445 uncore_pmu_cancel_hrtimer(box);
449 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
451 * Drain the remaining delta count out of a event
452 * that we are disabling:
454 uncore_perf_event_update(box, event);
455 hwc->state |= PERF_HES_UPTODATE;
459 static int uncore_pmu_event_add(struct perf_event *event, int flags)
461 struct intel_uncore_box *box = uncore_event_to_box(event);
462 struct hw_perf_event *hwc = &event->hw;
463 int assign[UNCORE_PMC_IDX_MAX];
469 ret = n = uncore_collect_events(box, event, false);
473 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
474 if (!(flags & PERF_EF_START))
475 hwc->state |= PERF_HES_ARCH;
477 ret = uncore_assign_events(box, assign, n);
481 /* save events moving to new counters */
482 for (i = 0; i < box->n_events; i++) {
483 event = box->event_list[i];
486 if (hwc->idx == assign[i] &&
487 hwc->last_tag == box->tags[assign[i]])
490 * Ensure we don't accidentally enable a stopped
491 * counter simply because we rescheduled.
493 if (hwc->state & PERF_HES_STOPPED)
494 hwc->state |= PERF_HES_ARCH;
496 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
499 /* reprogram moved events into new counters */
500 for (i = 0; i < n; i++) {
501 event = box->event_list[i];
504 if (hwc->idx != assign[i] ||
505 hwc->last_tag != box->tags[assign[i]])
506 uncore_assign_hw_event(box, event, assign[i]);
507 else if (i < box->n_events)
510 if (hwc->state & PERF_HES_ARCH)
513 uncore_pmu_event_start(event, 0);
520 static void uncore_pmu_event_del(struct perf_event *event, int flags)
522 struct intel_uncore_box *box = uncore_event_to_box(event);
525 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
527 for (i = 0; i < box->n_events; i++) {
528 if (event == box->event_list[i]) {
529 uncore_put_event_constraint(box, event);
531 while (++i < box->n_events)
532 box->event_list[i - 1] = box->event_list[i];
540 event->hw.last_tag = ~0ULL;
543 void uncore_pmu_event_read(struct perf_event *event)
545 struct intel_uncore_box *box = uncore_event_to_box(event);
546 uncore_perf_event_update(box, event);
550 * validation ensures the group can be loaded onto the
551 * PMU if it was the only group available.
553 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
554 struct perf_event *event)
556 struct perf_event *leader = event->group_leader;
557 struct intel_uncore_box *fake_box;
558 int ret = -EINVAL, n;
560 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
566 * the event is not yet connected with its
567 * siblings therefore we must first collect
568 * existing siblings, then add the new event
569 * before we can simulate the scheduling
571 n = uncore_collect_events(fake_box, leader, true);
575 fake_box->n_events = n;
576 n = uncore_collect_events(fake_box, event, false);
580 fake_box->n_events = n;
582 ret = uncore_assign_events(fake_box, NULL, n);
588 static int uncore_pmu_event_init(struct perf_event *event)
590 struct intel_uncore_pmu *pmu;
591 struct intel_uncore_box *box;
592 struct hw_perf_event *hwc = &event->hw;
595 if (event->attr.type != event->pmu->type)
598 pmu = uncore_event_to_pmu(event);
599 /* no device found for this pmu */
600 if (pmu->func_id < 0)
604 * Uncore PMU does measure at all privilege level all the time.
605 * So it doesn't make sense to specify any exclude bits.
607 if (event->attr.exclude_user || event->attr.exclude_kernel ||
608 event->attr.exclude_hv || event->attr.exclude_idle)
611 /* Sampling not supported yet */
612 if (hwc->sample_period)
616 * Place all uncore events for a particular physical package
621 box = uncore_pmu_to_box(pmu, event->cpu);
622 if (!box || box->cpu < 0)
624 event->cpu = box->cpu;
627 event->hw.last_tag = ~0ULL;
628 event->hw.extra_reg.idx = EXTRA_REG_NONE;
629 event->hw.branch_reg.idx = EXTRA_REG_NONE;
631 if (event->attr.config == UNCORE_FIXED_EVENT) {
632 /* no fixed counter */
633 if (!pmu->type->fixed_ctl)
636 * if there is only one fixed counter, only the first pmu
637 * can access the fixed counter
639 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
642 /* fixed counters have event field hardcoded to zero */
645 hwc->config = event->attr.config & pmu->type->event_mask;
646 if (pmu->type->ops->hw_config) {
647 ret = pmu->type->ops->hw_config(box, event);
653 if (event->group_leader != event)
654 ret = uncore_validate_group(pmu, event);
661 static ssize_t uncore_get_attr_cpumask(struct device *dev,
662 struct device_attribute *attr, char *buf)
664 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
667 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
669 static struct attribute *uncore_pmu_attrs[] = {
670 &dev_attr_cpumask.attr,
674 static struct attribute_group uncore_pmu_attr_group = {
675 .attrs = uncore_pmu_attrs,
678 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
682 if (!pmu->type->pmu) {
683 pmu->pmu = (struct pmu) {
684 .attr_groups = pmu->type->attr_groups,
685 .task_ctx_nr = perf_invalid_context,
686 .event_init = uncore_pmu_event_init,
687 .add = uncore_pmu_event_add,
688 .del = uncore_pmu_event_del,
689 .start = uncore_pmu_event_start,
690 .stop = uncore_pmu_event_stop,
691 .read = uncore_pmu_event_read,
694 pmu->pmu = *pmu->type->pmu;
695 pmu->pmu.attr_groups = pmu->type->attr_groups;
698 if (pmu->type->num_boxes == 1) {
699 if (strlen(pmu->type->name) > 0)
700 sprintf(pmu->name, "uncore_%s", pmu->type->name);
702 sprintf(pmu->name, "uncore");
704 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
708 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
712 static void __init uncore_type_exit(struct intel_uncore_type *type)
716 for (i = 0; i < type->num_boxes; i++)
717 free_percpu(type->pmus[i].box);
720 kfree(type->events_group);
721 type->events_group = NULL;
724 static void __init uncore_types_exit(struct intel_uncore_type **types)
727 for (i = 0; types[i]; i++)
728 uncore_type_exit(types[i]);
731 static int __init uncore_type_init(struct intel_uncore_type *type)
733 struct intel_uncore_pmu *pmus;
734 struct attribute_group *attr_group;
735 struct attribute **attrs;
738 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
744 type->unconstrainted = (struct event_constraint)
745 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
746 0, type->num_counters, 0, 0);
748 for (i = 0; i < type->num_boxes; i++) {
749 pmus[i].func_id = -1;
752 INIT_LIST_HEAD(&pmus[i].box_list);
753 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
758 if (type->event_descs) {
760 while (type->event_descs[i].attr.attr.name)
763 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
764 sizeof(*attr_group), GFP_KERNEL);
768 attrs = (struct attribute **)(attr_group + 1);
769 attr_group->name = "events";
770 attr_group->attrs = attrs;
772 for (j = 0; j < i; j++)
773 attrs[j] = &type->event_descs[j].attr.attr;
775 type->events_group = attr_group;
778 type->pmu_group = &uncore_pmu_attr_group;
781 uncore_type_exit(type);
785 static int __init uncore_types_init(struct intel_uncore_type **types)
789 for (i = 0; types[i]; i++) {
790 ret = uncore_type_init(types[i]);
797 uncore_type_exit(types[i]);
802 * add a pci uncore device
804 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
806 struct intel_uncore_pmu *pmu;
807 struct intel_uncore_box *box;
808 struct intel_uncore_type *type;
810 bool first_box = false;
812 phys_id = uncore_pcibus_to_physid[pdev->bus->number];
816 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
817 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
818 uncore_extra_pci_dev[phys_id][idx] = pdev;
819 pci_set_drvdata(pdev, NULL);
823 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
824 box = uncore_alloc_box(type, NUMA_NO_NODE);
829 * for performance monitoring unit with multiple boxes,
830 * each box has a different function id.
832 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
833 if (pmu->func_id < 0)
834 pmu->func_id = pdev->devfn;
836 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
838 box->phys_id = phys_id;
841 uncore_box_init(box);
842 pci_set_drvdata(pdev, box);
844 raw_spin_lock(&uncore_box_lock);
845 if (list_empty(&pmu->box_list))
847 list_add_tail(&box->list, &pmu->box_list);
848 raw_spin_unlock(&uncore_box_lock);
851 uncore_pmu_register(pmu);
855 static void uncore_pci_remove(struct pci_dev *pdev)
857 struct intel_uncore_box *box = pci_get_drvdata(pdev);
858 struct intel_uncore_pmu *pmu;
859 int i, cpu, phys_id = uncore_pcibus_to_physid[pdev->bus->number];
860 bool last_box = false;
862 box = pci_get_drvdata(pdev);
864 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
865 if (uncore_extra_pci_dev[phys_id][i] == pdev) {
866 uncore_extra_pci_dev[phys_id][i] = NULL;
870 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
875 if (WARN_ON_ONCE(phys_id != box->phys_id))
878 pci_set_drvdata(pdev, NULL);
880 raw_spin_lock(&uncore_box_lock);
881 list_del(&box->list);
882 if (list_empty(&pmu->box_list))
884 raw_spin_unlock(&uncore_box_lock);
886 for_each_possible_cpu(cpu) {
887 if (*per_cpu_ptr(pmu->box, cpu) == box) {
888 *per_cpu_ptr(pmu->box, cpu) = NULL;
889 atomic_dec(&box->refcnt);
893 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
897 perf_pmu_unregister(&pmu->pmu);
900 static int __init uncore_pci_init(void)
904 switch (boot_cpu_data.x86_model) {
905 case 45: /* Sandy Bridge-EP */
906 ret = snbep_uncore_pci_init();
908 case 62: /* Ivy Bridge-EP */
909 ret = ivbep_uncore_pci_init();
911 case 63: /* Haswell-EP */
912 ret = hswep_uncore_pci_init();
914 case 86: /* BDX-DE */
915 ret = bdx_uncore_pci_init();
917 case 42: /* Sandy Bridge */
918 ret = snb_uncore_pci_init();
920 case 58: /* Ivy Bridge */
921 ret = ivb_uncore_pci_init();
923 case 60: /* Haswell */
924 case 69: /* Haswell Celeron */
925 ret = hsw_uncore_pci_init();
927 case 61: /* Broadwell */
928 ret = bdw_uncore_pci_init();
937 ret = uncore_types_init(uncore_pci_uncores);
941 uncore_pci_driver->probe = uncore_pci_probe;
942 uncore_pci_driver->remove = uncore_pci_remove;
944 ret = pci_register_driver(uncore_pci_driver);
946 pcidrv_registered = true;
948 uncore_types_exit(uncore_pci_uncores);
953 static void __init uncore_pci_exit(void)
955 if (pcidrv_registered) {
956 pcidrv_registered = false;
957 pci_unregister_driver(uncore_pci_driver);
958 uncore_types_exit(uncore_pci_uncores);
962 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
963 static LIST_HEAD(boxes_to_free);
965 static void uncore_kfree_boxes(void)
967 struct intel_uncore_box *box;
969 while (!list_empty(&boxes_to_free)) {
970 box = list_entry(boxes_to_free.next,
971 struct intel_uncore_box, list);
972 list_del(&box->list);
977 static void uncore_cpu_dying(int cpu)
979 struct intel_uncore_type *type;
980 struct intel_uncore_pmu *pmu;
981 struct intel_uncore_box *box;
984 for (i = 0; uncore_msr_uncores[i]; i++) {
985 type = uncore_msr_uncores[i];
986 for (j = 0; j < type->num_boxes; j++) {
987 pmu = &type->pmus[j];
988 box = *per_cpu_ptr(pmu->box, cpu);
989 *per_cpu_ptr(pmu->box, cpu) = NULL;
990 if (box && atomic_dec_and_test(&box->refcnt))
991 list_add(&box->list, &boxes_to_free);
996 static int uncore_cpu_starting(int cpu)
998 struct intel_uncore_type *type;
999 struct intel_uncore_pmu *pmu;
1000 struct intel_uncore_box *box, *exist;
1001 int i, j, k, phys_id;
1003 phys_id = topology_physical_package_id(cpu);
1005 for (i = 0; uncore_msr_uncores[i]; i++) {
1006 type = uncore_msr_uncores[i];
1007 for (j = 0; j < type->num_boxes; j++) {
1008 pmu = &type->pmus[j];
1009 box = *per_cpu_ptr(pmu->box, cpu);
1010 /* called by uncore_cpu_init? */
1011 if (box && box->phys_id >= 0) {
1012 uncore_box_init(box);
1016 for_each_online_cpu(k) {
1017 exist = *per_cpu_ptr(pmu->box, k);
1018 if (exist && exist->phys_id == phys_id) {
1019 atomic_inc(&exist->refcnt);
1020 *per_cpu_ptr(pmu->box, cpu) = exist;
1022 list_add(&box->list,
1031 box->phys_id = phys_id;
1032 uncore_box_init(box);
1039 static int uncore_cpu_prepare(int cpu, int phys_id)
1041 struct intel_uncore_type *type;
1042 struct intel_uncore_pmu *pmu;
1043 struct intel_uncore_box *box;
1046 for (i = 0; uncore_msr_uncores[i]; i++) {
1047 type = uncore_msr_uncores[i];
1048 for (j = 0; j < type->num_boxes; j++) {
1049 pmu = &type->pmus[j];
1050 if (pmu->func_id < 0)
1053 box = uncore_alloc_box(type, cpu_to_node(cpu));
1058 box->phys_id = phys_id;
1059 *per_cpu_ptr(pmu->box, cpu) = box;
1066 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
1068 struct intel_uncore_type *type;
1069 struct intel_uncore_pmu *pmu;
1070 struct intel_uncore_box *box;
1073 for (i = 0; uncores[i]; i++) {
1075 for (j = 0; j < type->num_boxes; j++) {
1076 pmu = &type->pmus[j];
1078 box = uncore_pmu_to_box(pmu, new_cpu);
1080 box = uncore_pmu_to_box(pmu, old_cpu);
1085 WARN_ON_ONCE(box->cpu != -1);
1090 WARN_ON_ONCE(box->cpu != old_cpu);
1092 uncore_pmu_cancel_hrtimer(box);
1093 perf_pmu_migrate_context(&pmu->pmu,
1103 static void uncore_event_exit_cpu(int cpu)
1105 int i, phys_id, target;
1107 /* if exiting cpu is used for collecting uncore events */
1108 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1111 /* find a new cpu to collect uncore events */
1112 phys_id = topology_physical_package_id(cpu);
1114 for_each_online_cpu(i) {
1117 if (phys_id == topology_physical_package_id(i)) {
1123 /* migrate uncore events to the new cpu */
1125 cpumask_set_cpu(target, &uncore_cpu_mask);
1127 uncore_change_context(uncore_msr_uncores, cpu, target);
1128 uncore_change_context(uncore_pci_uncores, cpu, target);
1131 static void uncore_event_init_cpu(int cpu)
1135 phys_id = topology_physical_package_id(cpu);
1136 for_each_cpu(i, &uncore_cpu_mask) {
1137 if (phys_id == topology_physical_package_id(i))
1141 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1143 uncore_change_context(uncore_msr_uncores, -1, cpu);
1144 uncore_change_context(uncore_pci_uncores, -1, cpu);
1147 static int uncore_cpu_notifier(struct notifier_block *self,
1148 unsigned long action, void *hcpu)
1150 unsigned int cpu = (long)hcpu;
1152 /* allocate/free data structure for uncore box */
1153 switch (action & ~CPU_TASKS_FROZEN) {
1154 case CPU_UP_PREPARE:
1155 uncore_cpu_prepare(cpu, -1);
1158 uncore_cpu_starting(cpu);
1160 case CPU_UP_CANCELED:
1162 uncore_cpu_dying(cpu);
1166 uncore_kfree_boxes();
1172 /* select the cpu that collects uncore events */
1173 switch (action & ~CPU_TASKS_FROZEN) {
1174 case CPU_DOWN_FAILED:
1176 uncore_event_init_cpu(cpu);
1178 case CPU_DOWN_PREPARE:
1179 uncore_event_exit_cpu(cpu);
1188 static struct notifier_block uncore_cpu_nb = {
1189 .notifier_call = uncore_cpu_notifier,
1191 * to migrate uncore events, our notifier should be executed
1192 * before perf core's notifier.
1194 .priority = CPU_PRI_PERF + 1,
1197 static void __init uncore_cpu_setup(void *dummy)
1199 uncore_cpu_starting(smp_processor_id());
1202 static int __init uncore_cpu_init(void)
1206 switch (boot_cpu_data.x86_model) {
1207 case 26: /* Nehalem */
1209 case 37: /* Westmere */
1211 nhm_uncore_cpu_init();
1213 case 42: /* Sandy Bridge */
1214 case 58: /* Ivy Bridge */
1215 case 60: /* Haswell */
1216 case 69: /* Haswell */
1217 case 70: /* Haswell */
1218 case 61: /* Broadwell */
1219 case 71: /* Broadwell */
1220 snb_uncore_cpu_init();
1222 case 45: /* Sandy Bridge-EP */
1223 snbep_uncore_cpu_init();
1225 case 46: /* Nehalem-EX */
1226 case 47: /* Westmere-EX aka. Xeon E7 */
1227 nhmex_uncore_cpu_init();
1229 case 62: /* Ivy Bridge-EP */
1230 ivbep_uncore_cpu_init();
1232 case 63: /* Haswell-EP */
1233 hswep_uncore_cpu_init();
1235 case 86: /* BDX-DE */
1236 bdx_uncore_cpu_init();
1242 ret = uncore_types_init(uncore_msr_uncores);
1249 static int __init uncore_pmus_register(void)
1251 struct intel_uncore_pmu *pmu;
1252 struct intel_uncore_type *type;
1255 for (i = 0; uncore_msr_uncores[i]; i++) {
1256 type = uncore_msr_uncores[i];
1257 for (j = 0; j < type->num_boxes; j++) {
1258 pmu = &type->pmus[j];
1259 uncore_pmu_register(pmu);
1266 static void __init uncore_cpumask_init(void)
1271 * ony invoke once from msr or pci init code
1273 if (!cpumask_empty(&uncore_cpu_mask))
1276 cpu_notifier_register_begin();
1278 for_each_online_cpu(cpu) {
1279 int i, phys_id = topology_physical_package_id(cpu);
1281 for_each_cpu(i, &uncore_cpu_mask) {
1282 if (phys_id == topology_physical_package_id(i)) {
1290 uncore_cpu_prepare(cpu, phys_id);
1291 uncore_event_init_cpu(cpu);
1293 on_each_cpu(uncore_cpu_setup, NULL, 1);
1295 __register_cpu_notifier(&uncore_cpu_nb);
1297 cpu_notifier_register_done();
1301 static int __init intel_uncore_init(void)
1305 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1308 if (cpu_has_hypervisor)
1311 ret = uncore_pci_init();
1314 ret = uncore_cpu_init();
1319 uncore_cpumask_init();
1321 uncore_pmus_register();
1326 device_initcall(intel_uncore_init);