Merge branch 'nvme-5.2-rc2' of git://git.infradead.org/nvme into for-linus
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore.c
1 #include <linux/module.h>
2
3 #include <asm/cpu_device_id.h>
4 #include <asm/intel-family.h>
5 #include "uncore.h"
6
7 static struct intel_uncore_type *empty_uncore[] = { NULL, };
8 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
9 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
10
11 static bool pcidrv_registered;
12 struct pci_driver *uncore_pci_driver;
13 /* pci bus to socket mapping */
14 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
15 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
16 struct pci_extra_dev *uncore_extra_pci_dev;
17 static int max_packages;
18
19 /* mask of cpus that collect uncore events */
20 static cpumask_t uncore_cpu_mask;
21
22 /* constraint for the fixed counter */
23 static struct event_constraint uncore_constraint_fixed =
24         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
25 struct event_constraint uncore_constraint_empty =
26         EVENT_CONSTRAINT(0, 0, 0);
27
28 MODULE_LICENSE("GPL");
29
30 static int uncore_pcibus_to_physid(struct pci_bus *bus)
31 {
32         struct pci2phy_map *map;
33         int phys_id = -1;
34
35         raw_spin_lock(&pci2phy_map_lock);
36         list_for_each_entry(map, &pci2phy_map_head, list) {
37                 if (map->segment == pci_domain_nr(bus)) {
38                         phys_id = map->pbus_to_physid[bus->number];
39                         break;
40                 }
41         }
42         raw_spin_unlock(&pci2phy_map_lock);
43
44         return phys_id;
45 }
46
47 static void uncore_free_pcibus_map(void)
48 {
49         struct pci2phy_map *map, *tmp;
50
51         list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
52                 list_del(&map->list);
53                 kfree(map);
54         }
55 }
56
57 struct pci2phy_map *__find_pci2phy_map(int segment)
58 {
59         struct pci2phy_map *map, *alloc = NULL;
60         int i;
61
62         lockdep_assert_held(&pci2phy_map_lock);
63
64 lookup:
65         list_for_each_entry(map, &pci2phy_map_head, list) {
66                 if (map->segment == segment)
67                         goto end;
68         }
69
70         if (!alloc) {
71                 raw_spin_unlock(&pci2phy_map_lock);
72                 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
73                 raw_spin_lock(&pci2phy_map_lock);
74
75                 if (!alloc)
76                         return NULL;
77
78                 goto lookup;
79         }
80
81         map = alloc;
82         alloc = NULL;
83         map->segment = segment;
84         for (i = 0; i < 256; i++)
85                 map->pbus_to_physid[i] = -1;
86         list_add_tail(&map->list, &pci2phy_map_head);
87
88 end:
89         kfree(alloc);
90         return map;
91 }
92
93 ssize_t uncore_event_show(struct kobject *kobj,
94                           struct kobj_attribute *attr, char *buf)
95 {
96         struct uncore_event_desc *event =
97                 container_of(attr, struct uncore_event_desc, attr);
98         return sprintf(buf, "%s", event->config);
99 }
100
101 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
102 {
103         unsigned int pkgid = topology_logical_package_id(cpu);
104
105         /*
106          * The unsigned check also catches the '-1' return value for non
107          * existent mappings in the topology map.
108          */
109         return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
110 }
111
112 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
113 {
114         u64 count;
115
116         rdmsrl(event->hw.event_base, count);
117
118         return count;
119 }
120
121 /*
122  * generic get constraint function for shared match/mask registers.
123  */
124 struct event_constraint *
125 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
126 {
127         struct intel_uncore_extra_reg *er;
128         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
129         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
130         unsigned long flags;
131         bool ok = false;
132
133         /*
134          * reg->alloc can be set due to existing state, so for fake box we
135          * need to ignore this, otherwise we might fail to allocate proper
136          * fake state for this extra reg constraint.
137          */
138         if (reg1->idx == EXTRA_REG_NONE ||
139             (!uncore_box_is_fake(box) && reg1->alloc))
140                 return NULL;
141
142         er = &box->shared_regs[reg1->idx];
143         raw_spin_lock_irqsave(&er->lock, flags);
144         if (!atomic_read(&er->ref) ||
145             (er->config1 == reg1->config && er->config2 == reg2->config)) {
146                 atomic_inc(&er->ref);
147                 er->config1 = reg1->config;
148                 er->config2 = reg2->config;
149                 ok = true;
150         }
151         raw_spin_unlock_irqrestore(&er->lock, flags);
152
153         if (ok) {
154                 if (!uncore_box_is_fake(box))
155                         reg1->alloc = 1;
156                 return NULL;
157         }
158
159         return &uncore_constraint_empty;
160 }
161
162 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
163 {
164         struct intel_uncore_extra_reg *er;
165         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
166
167         /*
168          * Only put constraint if extra reg was actually allocated. Also
169          * takes care of event which do not use an extra shared reg.
170          *
171          * Also, if this is a fake box we shouldn't touch any event state
172          * (reg->alloc) and we don't care about leaving inconsistent box
173          * state either since it will be thrown out.
174          */
175         if (uncore_box_is_fake(box) || !reg1->alloc)
176                 return;
177
178         er = &box->shared_regs[reg1->idx];
179         atomic_dec(&er->ref);
180         reg1->alloc = 0;
181 }
182
183 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
184 {
185         struct intel_uncore_extra_reg *er;
186         unsigned long flags;
187         u64 config;
188
189         er = &box->shared_regs[idx];
190
191         raw_spin_lock_irqsave(&er->lock, flags);
192         config = er->config;
193         raw_spin_unlock_irqrestore(&er->lock, flags);
194
195         return config;
196 }
197
198 static void uncore_assign_hw_event(struct intel_uncore_box *box,
199                                    struct perf_event *event, int idx)
200 {
201         struct hw_perf_event *hwc = &event->hw;
202
203         hwc->idx = idx;
204         hwc->last_tag = ++box->tags[idx];
205
206         if (uncore_pmc_fixed(hwc->idx)) {
207                 hwc->event_base = uncore_fixed_ctr(box);
208                 hwc->config_base = uncore_fixed_ctl(box);
209                 return;
210         }
211
212         hwc->config_base = uncore_event_ctl(box, hwc->idx);
213         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
214 }
215
216 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
217 {
218         u64 prev_count, new_count, delta;
219         int shift;
220
221         if (uncore_pmc_freerunning(event->hw.idx))
222                 shift = 64 - uncore_freerunning_bits(box, event);
223         else if (uncore_pmc_fixed(event->hw.idx))
224                 shift = 64 - uncore_fixed_ctr_bits(box);
225         else
226                 shift = 64 - uncore_perf_ctr_bits(box);
227
228         /* the hrtimer might modify the previous event value */
229 again:
230         prev_count = local64_read(&event->hw.prev_count);
231         new_count = uncore_read_counter(box, event);
232         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
233                 goto again;
234
235         delta = (new_count << shift) - (prev_count << shift);
236         delta >>= shift;
237
238         local64_add(delta, &event->count);
239 }
240
241 /*
242  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
243  * for SandyBridge. So we use hrtimer to periodically poll the counter
244  * to avoid overflow.
245  */
246 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
247 {
248         struct intel_uncore_box *box;
249         struct perf_event *event;
250         unsigned long flags;
251         int bit;
252
253         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
254         if (!box->n_active || box->cpu != smp_processor_id())
255                 return HRTIMER_NORESTART;
256         /*
257          * disable local interrupt to prevent uncore_pmu_event_start/stop
258          * to interrupt the update process
259          */
260         local_irq_save(flags);
261
262         /*
263          * handle boxes with an active event list as opposed to active
264          * counters
265          */
266         list_for_each_entry(event, &box->active_list, active_entry) {
267                 uncore_perf_event_update(box, event);
268         }
269
270         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
271                 uncore_perf_event_update(box, box->events[bit]);
272
273         local_irq_restore(flags);
274
275         hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
276         return HRTIMER_RESTART;
277 }
278
279 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
280 {
281         hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
282                       HRTIMER_MODE_REL_PINNED);
283 }
284
285 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
286 {
287         hrtimer_cancel(&box->hrtimer);
288 }
289
290 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
291 {
292         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
293         box->hrtimer.function = uncore_pmu_hrtimer;
294 }
295
296 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
297                                                  int node)
298 {
299         int i, size, numshared = type->num_shared_regs ;
300         struct intel_uncore_box *box;
301
302         size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
303
304         box = kzalloc_node(size, GFP_KERNEL, node);
305         if (!box)
306                 return NULL;
307
308         for (i = 0; i < numshared; i++)
309                 raw_spin_lock_init(&box->shared_regs[i].lock);
310
311         uncore_pmu_init_hrtimer(box);
312         box->cpu = -1;
313         box->pci_phys_id = -1;
314         box->pkgid = -1;
315
316         /* set default hrtimer timeout */
317         box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
318
319         INIT_LIST_HEAD(&box->active_list);
320
321         return box;
322 }
323
324 /*
325  * Using uncore_pmu_event_init pmu event_init callback
326  * as a detection point for uncore events.
327  */
328 static int uncore_pmu_event_init(struct perf_event *event);
329
330 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
331 {
332         return &box->pmu->pmu == event->pmu;
333 }
334
335 static int
336 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
337                       bool dogrp)
338 {
339         struct perf_event *event;
340         int n, max_count;
341
342         max_count = box->pmu->type->num_counters;
343         if (box->pmu->type->fixed_ctl)
344                 max_count++;
345
346         if (box->n_events >= max_count)
347                 return -EINVAL;
348
349         n = box->n_events;
350
351         if (is_box_event(box, leader)) {
352                 box->event_list[n] = leader;
353                 n++;
354         }
355
356         if (!dogrp)
357                 return n;
358
359         for_each_sibling_event(event, leader) {
360                 if (!is_box_event(box, event) ||
361                     event->state <= PERF_EVENT_STATE_OFF)
362                         continue;
363
364                 if (n >= max_count)
365                         return -EINVAL;
366
367                 box->event_list[n] = event;
368                 n++;
369         }
370         return n;
371 }
372
373 static struct event_constraint *
374 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
375 {
376         struct intel_uncore_type *type = box->pmu->type;
377         struct event_constraint *c;
378
379         if (type->ops->get_constraint) {
380                 c = type->ops->get_constraint(box, event);
381                 if (c)
382                         return c;
383         }
384
385         if (event->attr.config == UNCORE_FIXED_EVENT)
386                 return &uncore_constraint_fixed;
387
388         if (type->constraints) {
389                 for_each_event_constraint(c, type->constraints) {
390                         if ((event->hw.config & c->cmask) == c->code)
391                                 return c;
392                 }
393         }
394
395         return &type->unconstrainted;
396 }
397
398 static void uncore_put_event_constraint(struct intel_uncore_box *box,
399                                         struct perf_event *event)
400 {
401         if (box->pmu->type->ops->put_constraint)
402                 box->pmu->type->ops->put_constraint(box, event);
403 }
404
405 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
406 {
407         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
408         struct event_constraint *c;
409         int i, wmin, wmax, ret = 0;
410         struct hw_perf_event *hwc;
411
412         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
413
414         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
415                 c = uncore_get_event_constraint(box, box->event_list[i]);
416                 box->event_constraint[i] = c;
417                 wmin = min(wmin, c->weight);
418                 wmax = max(wmax, c->weight);
419         }
420
421         /* fastpath, try to reuse previous register */
422         for (i = 0; i < n; i++) {
423                 hwc = &box->event_list[i]->hw;
424                 c = box->event_constraint[i];
425
426                 /* never assigned */
427                 if (hwc->idx == -1)
428                         break;
429
430                 /* constraint still honored */
431                 if (!test_bit(hwc->idx, c->idxmsk))
432                         break;
433
434                 /* not already used */
435                 if (test_bit(hwc->idx, used_mask))
436                         break;
437
438                 __set_bit(hwc->idx, used_mask);
439                 if (assign)
440                         assign[i] = hwc->idx;
441         }
442         /* slow path */
443         if (i != n)
444                 ret = perf_assign_events(box->event_constraint, n,
445                                          wmin, wmax, n, assign);
446
447         if (!assign || ret) {
448                 for (i = 0; i < n; i++)
449                         uncore_put_event_constraint(box, box->event_list[i]);
450         }
451         return ret ? -EINVAL : 0;
452 }
453
454 void uncore_pmu_event_start(struct perf_event *event, int flags)
455 {
456         struct intel_uncore_box *box = uncore_event_to_box(event);
457         int idx = event->hw.idx;
458
459         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
460                 return;
461
462         /*
463          * Free running counter is read-only and always active.
464          * Use the current counter value as start point.
465          * There is no overflow interrupt for free running counter.
466          * Use hrtimer to periodically poll the counter to avoid overflow.
467          */
468         if (uncore_pmc_freerunning(event->hw.idx)) {
469                 list_add_tail(&event->active_entry, &box->active_list);
470                 local64_set(&event->hw.prev_count,
471                             uncore_read_counter(box, event));
472                 if (box->n_active++ == 0)
473                         uncore_pmu_start_hrtimer(box);
474                 return;
475         }
476
477         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
478                 return;
479
480         event->hw.state = 0;
481         box->events[idx] = event;
482         box->n_active++;
483         __set_bit(idx, box->active_mask);
484
485         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
486         uncore_enable_event(box, event);
487
488         if (box->n_active == 1) {
489                 uncore_enable_box(box);
490                 uncore_pmu_start_hrtimer(box);
491         }
492 }
493
494 void uncore_pmu_event_stop(struct perf_event *event, int flags)
495 {
496         struct intel_uncore_box *box = uncore_event_to_box(event);
497         struct hw_perf_event *hwc = &event->hw;
498
499         /* Cannot disable free running counter which is read-only */
500         if (uncore_pmc_freerunning(hwc->idx)) {
501                 list_del(&event->active_entry);
502                 if (--box->n_active == 0)
503                         uncore_pmu_cancel_hrtimer(box);
504                 uncore_perf_event_update(box, event);
505                 return;
506         }
507
508         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
509                 uncore_disable_event(box, event);
510                 box->n_active--;
511                 box->events[hwc->idx] = NULL;
512                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
513                 hwc->state |= PERF_HES_STOPPED;
514
515                 if (box->n_active == 0) {
516                         uncore_disable_box(box);
517                         uncore_pmu_cancel_hrtimer(box);
518                 }
519         }
520
521         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
522                 /*
523                  * Drain the remaining delta count out of a event
524                  * that we are disabling:
525                  */
526                 uncore_perf_event_update(box, event);
527                 hwc->state |= PERF_HES_UPTODATE;
528         }
529 }
530
531 int uncore_pmu_event_add(struct perf_event *event, int flags)
532 {
533         struct intel_uncore_box *box = uncore_event_to_box(event);
534         struct hw_perf_event *hwc = &event->hw;
535         int assign[UNCORE_PMC_IDX_MAX];
536         int i, n, ret;
537
538         if (!box)
539                 return -ENODEV;
540
541         /*
542          * The free funning counter is assigned in event_init().
543          * The free running counter event and free running counter
544          * are 1:1 mapped. It doesn't need to be tracked in event_list.
545          */
546         if (uncore_pmc_freerunning(hwc->idx)) {
547                 if (flags & PERF_EF_START)
548                         uncore_pmu_event_start(event, 0);
549                 return 0;
550         }
551
552         ret = n = uncore_collect_events(box, event, false);
553         if (ret < 0)
554                 return ret;
555
556         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
557         if (!(flags & PERF_EF_START))
558                 hwc->state |= PERF_HES_ARCH;
559
560         ret = uncore_assign_events(box, assign, n);
561         if (ret)
562                 return ret;
563
564         /* save events moving to new counters */
565         for (i = 0; i < box->n_events; i++) {
566                 event = box->event_list[i];
567                 hwc = &event->hw;
568
569                 if (hwc->idx == assign[i] &&
570                         hwc->last_tag == box->tags[assign[i]])
571                         continue;
572                 /*
573                  * Ensure we don't accidentally enable a stopped
574                  * counter simply because we rescheduled.
575                  */
576                 if (hwc->state & PERF_HES_STOPPED)
577                         hwc->state |= PERF_HES_ARCH;
578
579                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
580         }
581
582         /* reprogram moved events into new counters */
583         for (i = 0; i < n; i++) {
584                 event = box->event_list[i];
585                 hwc = &event->hw;
586
587                 if (hwc->idx != assign[i] ||
588                         hwc->last_tag != box->tags[assign[i]])
589                         uncore_assign_hw_event(box, event, assign[i]);
590                 else if (i < box->n_events)
591                         continue;
592
593                 if (hwc->state & PERF_HES_ARCH)
594                         continue;
595
596                 uncore_pmu_event_start(event, 0);
597         }
598         box->n_events = n;
599
600         return 0;
601 }
602
603 void uncore_pmu_event_del(struct perf_event *event, int flags)
604 {
605         struct intel_uncore_box *box = uncore_event_to_box(event);
606         int i;
607
608         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
609
610         /*
611          * The event for free running counter is not tracked by event_list.
612          * It doesn't need to force event->hw.idx = -1 to reassign the counter.
613          * Because the event and the free running counter are 1:1 mapped.
614          */
615         if (uncore_pmc_freerunning(event->hw.idx))
616                 return;
617
618         for (i = 0; i < box->n_events; i++) {
619                 if (event == box->event_list[i]) {
620                         uncore_put_event_constraint(box, event);
621
622                         for (++i; i < box->n_events; i++)
623                                 box->event_list[i - 1] = box->event_list[i];
624
625                         --box->n_events;
626                         break;
627                 }
628         }
629
630         event->hw.idx = -1;
631         event->hw.last_tag = ~0ULL;
632 }
633
634 void uncore_pmu_event_read(struct perf_event *event)
635 {
636         struct intel_uncore_box *box = uncore_event_to_box(event);
637         uncore_perf_event_update(box, event);
638 }
639
640 /*
641  * validation ensures the group can be loaded onto the
642  * PMU if it was the only group available.
643  */
644 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
645                                 struct perf_event *event)
646 {
647         struct perf_event *leader = event->group_leader;
648         struct intel_uncore_box *fake_box;
649         int ret = -EINVAL, n;
650
651         /* The free running counter is always active. */
652         if (uncore_pmc_freerunning(event->hw.idx))
653                 return 0;
654
655         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
656         if (!fake_box)
657                 return -ENOMEM;
658
659         fake_box->pmu = pmu;
660         /*
661          * the event is not yet connected with its
662          * siblings therefore we must first collect
663          * existing siblings, then add the new event
664          * before we can simulate the scheduling
665          */
666         n = uncore_collect_events(fake_box, leader, true);
667         if (n < 0)
668                 goto out;
669
670         fake_box->n_events = n;
671         n = uncore_collect_events(fake_box, event, false);
672         if (n < 0)
673                 goto out;
674
675         fake_box->n_events = n;
676
677         ret = uncore_assign_events(fake_box, NULL, n);
678 out:
679         kfree(fake_box);
680         return ret;
681 }
682
683 static int uncore_pmu_event_init(struct perf_event *event)
684 {
685         struct intel_uncore_pmu *pmu;
686         struct intel_uncore_box *box;
687         struct hw_perf_event *hwc = &event->hw;
688         int ret;
689
690         if (event->attr.type != event->pmu->type)
691                 return -ENOENT;
692
693         pmu = uncore_event_to_pmu(event);
694         /* no device found for this pmu */
695         if (pmu->func_id < 0)
696                 return -ENOENT;
697
698         /* Sampling not supported yet */
699         if (hwc->sample_period)
700                 return -EINVAL;
701
702         /*
703          * Place all uncore events for a particular physical package
704          * onto a single cpu
705          */
706         if (event->cpu < 0)
707                 return -EINVAL;
708         box = uncore_pmu_to_box(pmu, event->cpu);
709         if (!box || box->cpu < 0)
710                 return -EINVAL;
711         event->cpu = box->cpu;
712         event->pmu_private = box;
713
714         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
715
716         event->hw.idx = -1;
717         event->hw.last_tag = ~0ULL;
718         event->hw.extra_reg.idx = EXTRA_REG_NONE;
719         event->hw.branch_reg.idx = EXTRA_REG_NONE;
720
721         if (event->attr.config == UNCORE_FIXED_EVENT) {
722                 /* no fixed counter */
723                 if (!pmu->type->fixed_ctl)
724                         return -EINVAL;
725                 /*
726                  * if there is only one fixed counter, only the first pmu
727                  * can access the fixed counter
728                  */
729                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
730                         return -EINVAL;
731
732                 /* fixed counters have event field hardcoded to zero */
733                 hwc->config = 0ULL;
734         } else if (is_freerunning_event(event)) {
735                 hwc->config = event->attr.config;
736                 if (!check_valid_freerunning_event(box, event))
737                         return -EINVAL;
738                 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
739                 /*
740                  * The free running counter event and free running counter
741                  * are always 1:1 mapped.
742                  * The free running counter is always active.
743                  * Assign the free running counter here.
744                  */
745                 event->hw.event_base = uncore_freerunning_counter(box, event);
746         } else {
747                 hwc->config = event->attr.config &
748                               (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
749                 if (pmu->type->ops->hw_config) {
750                         ret = pmu->type->ops->hw_config(box, event);
751                         if (ret)
752                                 return ret;
753                 }
754         }
755
756         if (event->group_leader != event)
757                 ret = uncore_validate_group(pmu, event);
758         else
759                 ret = 0;
760
761         return ret;
762 }
763
764 static ssize_t uncore_get_attr_cpumask(struct device *dev,
765                                 struct device_attribute *attr, char *buf)
766 {
767         return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
768 }
769
770 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
771
772 static struct attribute *uncore_pmu_attrs[] = {
773         &dev_attr_cpumask.attr,
774         NULL,
775 };
776
777 static const struct attribute_group uncore_pmu_attr_group = {
778         .attrs = uncore_pmu_attrs,
779 };
780
781 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
782 {
783         int ret;
784
785         if (!pmu->type->pmu) {
786                 pmu->pmu = (struct pmu) {
787                         .attr_groups    = pmu->type->attr_groups,
788                         .task_ctx_nr    = perf_invalid_context,
789                         .event_init     = uncore_pmu_event_init,
790                         .add            = uncore_pmu_event_add,
791                         .del            = uncore_pmu_event_del,
792                         .start          = uncore_pmu_event_start,
793                         .stop           = uncore_pmu_event_stop,
794                         .read           = uncore_pmu_event_read,
795                         .module         = THIS_MODULE,
796                         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
797                 };
798         } else {
799                 pmu->pmu = *pmu->type->pmu;
800                 pmu->pmu.attr_groups = pmu->type->attr_groups;
801         }
802
803         if (pmu->type->num_boxes == 1) {
804                 if (strlen(pmu->type->name) > 0)
805                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
806                 else
807                         sprintf(pmu->name, "uncore");
808         } else {
809                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
810                         pmu->pmu_idx);
811         }
812
813         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
814         if (!ret)
815                 pmu->registered = true;
816         return ret;
817 }
818
819 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
820 {
821         if (!pmu->registered)
822                 return;
823         perf_pmu_unregister(&pmu->pmu);
824         pmu->registered = false;
825 }
826
827 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
828 {
829         int pkg;
830
831         for (pkg = 0; pkg < max_packages; pkg++)
832                 kfree(pmu->boxes[pkg]);
833         kfree(pmu->boxes);
834 }
835
836 static void uncore_type_exit(struct intel_uncore_type *type)
837 {
838         struct intel_uncore_pmu *pmu = type->pmus;
839         int i;
840
841         if (pmu) {
842                 for (i = 0; i < type->num_boxes; i++, pmu++) {
843                         uncore_pmu_unregister(pmu);
844                         uncore_free_boxes(pmu);
845                 }
846                 kfree(type->pmus);
847                 type->pmus = NULL;
848         }
849         kfree(type->events_group);
850         type->events_group = NULL;
851 }
852
853 static void uncore_types_exit(struct intel_uncore_type **types)
854 {
855         for (; *types; types++)
856                 uncore_type_exit(*types);
857 }
858
859 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
860 {
861         struct intel_uncore_pmu *pmus;
862         size_t size;
863         int i, j;
864
865         pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
866         if (!pmus)
867                 return -ENOMEM;
868
869         size = max_packages * sizeof(struct intel_uncore_box *);
870
871         for (i = 0; i < type->num_boxes; i++) {
872                 pmus[i].func_id = setid ? i : -1;
873                 pmus[i].pmu_idx = i;
874                 pmus[i].type    = type;
875                 pmus[i].boxes   = kzalloc(size, GFP_KERNEL);
876                 if (!pmus[i].boxes)
877                         goto err;
878         }
879
880         type->pmus = pmus;
881         type->unconstrainted = (struct event_constraint)
882                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
883                                 0, type->num_counters, 0, 0);
884
885         if (type->event_descs) {
886                 struct {
887                         struct attribute_group group;
888                         struct attribute *attrs[];
889                 } *attr_group;
890                 for (i = 0; type->event_descs[i].attr.attr.name; i++);
891
892                 attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
893                                                                 GFP_KERNEL);
894                 if (!attr_group)
895                         goto err;
896
897                 attr_group->group.name = "events";
898                 attr_group->group.attrs = attr_group->attrs;
899
900                 for (j = 0; j < i; j++)
901                         attr_group->attrs[j] = &type->event_descs[j].attr.attr;
902
903                 type->events_group = &attr_group->group;
904         }
905
906         type->pmu_group = &uncore_pmu_attr_group;
907
908         return 0;
909
910 err:
911         for (i = 0; i < type->num_boxes; i++)
912                 kfree(pmus[i].boxes);
913         kfree(pmus);
914
915         return -ENOMEM;
916 }
917
918 static int __init
919 uncore_types_init(struct intel_uncore_type **types, bool setid)
920 {
921         int ret;
922
923         for (; *types; types++) {
924                 ret = uncore_type_init(*types, setid);
925                 if (ret)
926                         return ret;
927         }
928         return 0;
929 }
930
931 /*
932  * add a pci uncore device
933  */
934 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
935 {
936         struct intel_uncore_type *type;
937         struct intel_uncore_pmu *pmu = NULL;
938         struct intel_uncore_box *box;
939         int phys_id, pkg, ret;
940
941         phys_id = uncore_pcibus_to_physid(pdev->bus);
942         if (phys_id < 0)
943                 return -ENODEV;
944
945         pkg = topology_phys_to_logical_pkg(phys_id);
946         if (pkg < 0)
947                 return -EINVAL;
948
949         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
950                 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
951
952                 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
953                 pci_set_drvdata(pdev, NULL);
954                 return 0;
955         }
956
957         type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
958
959         /*
960          * Some platforms, e.g.  Knights Landing, use a common PCI device ID
961          * for multiple instances of an uncore PMU device type. We should check
962          * PCI slot and func to indicate the uncore box.
963          */
964         if (id->driver_data & ~0xffff) {
965                 struct pci_driver *pci_drv = pdev->driver;
966                 const struct pci_device_id *ids = pci_drv->id_table;
967                 unsigned int devfn;
968
969                 while (ids && ids->vendor) {
970                         if ((ids->vendor == pdev->vendor) &&
971                             (ids->device == pdev->device)) {
972                                 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
973                                                   UNCORE_PCI_DEV_FUNC(ids->driver_data));
974                                 if (devfn == pdev->devfn) {
975                                         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
976                                         break;
977                                 }
978                         }
979                         ids++;
980                 }
981                 if (pmu == NULL)
982                         return -ENODEV;
983         } else {
984                 /*
985                  * for performance monitoring unit with multiple boxes,
986                  * each box has a different function id.
987                  */
988                 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
989         }
990
991         if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
992                 return -EINVAL;
993
994         box = uncore_alloc_box(type, NUMA_NO_NODE);
995         if (!box)
996                 return -ENOMEM;
997
998         if (pmu->func_id < 0)
999                 pmu->func_id = pdev->devfn;
1000         else
1001                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1002
1003         atomic_inc(&box->refcnt);
1004         box->pci_phys_id = phys_id;
1005         box->pkgid = pkg;
1006         box->pci_dev = pdev;
1007         box->pmu = pmu;
1008         uncore_box_init(box);
1009         pci_set_drvdata(pdev, box);
1010
1011         pmu->boxes[pkg] = box;
1012         if (atomic_inc_return(&pmu->activeboxes) > 1)
1013                 return 0;
1014
1015         /* First active box registers the pmu */
1016         ret = uncore_pmu_register(pmu);
1017         if (ret) {
1018                 pci_set_drvdata(pdev, NULL);
1019                 pmu->boxes[pkg] = NULL;
1020                 uncore_box_exit(box);
1021                 kfree(box);
1022         }
1023         return ret;
1024 }
1025
1026 static void uncore_pci_remove(struct pci_dev *pdev)
1027 {
1028         struct intel_uncore_box *box;
1029         struct intel_uncore_pmu *pmu;
1030         int i, phys_id, pkg;
1031
1032         phys_id = uncore_pcibus_to_physid(pdev->bus);
1033
1034         box = pci_get_drvdata(pdev);
1035         if (!box) {
1036                 pkg = topology_phys_to_logical_pkg(phys_id);
1037                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1038                         if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
1039                                 uncore_extra_pci_dev[pkg].dev[i] = NULL;
1040                                 break;
1041                         }
1042                 }
1043                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1044                 return;
1045         }
1046
1047         pmu = box->pmu;
1048         if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1049                 return;
1050
1051         pci_set_drvdata(pdev, NULL);
1052         pmu->boxes[box->pkgid] = NULL;
1053         if (atomic_dec_return(&pmu->activeboxes) == 0)
1054                 uncore_pmu_unregister(pmu);
1055         uncore_box_exit(box);
1056         kfree(box);
1057 }
1058
1059 static int __init uncore_pci_init(void)
1060 {
1061         size_t size;
1062         int ret;
1063
1064         size = max_packages * sizeof(struct pci_extra_dev);
1065         uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1066         if (!uncore_extra_pci_dev) {
1067                 ret = -ENOMEM;
1068                 goto err;
1069         }
1070
1071         ret = uncore_types_init(uncore_pci_uncores, false);
1072         if (ret)
1073                 goto errtype;
1074
1075         uncore_pci_driver->probe = uncore_pci_probe;
1076         uncore_pci_driver->remove = uncore_pci_remove;
1077
1078         ret = pci_register_driver(uncore_pci_driver);
1079         if (ret)
1080                 goto errtype;
1081
1082         pcidrv_registered = true;
1083         return 0;
1084
1085 errtype:
1086         uncore_types_exit(uncore_pci_uncores);
1087         kfree(uncore_extra_pci_dev);
1088         uncore_extra_pci_dev = NULL;
1089         uncore_free_pcibus_map();
1090 err:
1091         uncore_pci_uncores = empty_uncore;
1092         return ret;
1093 }
1094
1095 static void uncore_pci_exit(void)
1096 {
1097         if (pcidrv_registered) {
1098                 pcidrv_registered = false;
1099                 pci_unregister_driver(uncore_pci_driver);
1100                 uncore_types_exit(uncore_pci_uncores);
1101                 kfree(uncore_extra_pci_dev);
1102                 uncore_free_pcibus_map();
1103         }
1104 }
1105
1106 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1107                                    int new_cpu)
1108 {
1109         struct intel_uncore_pmu *pmu = type->pmus;
1110         struct intel_uncore_box *box;
1111         int i, pkg;
1112
1113         pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1114         for (i = 0; i < type->num_boxes; i++, pmu++) {
1115                 box = pmu->boxes[pkg];
1116                 if (!box)
1117                         continue;
1118
1119                 if (old_cpu < 0) {
1120                         WARN_ON_ONCE(box->cpu != -1);
1121                         box->cpu = new_cpu;
1122                         continue;
1123                 }
1124
1125                 WARN_ON_ONCE(box->cpu != old_cpu);
1126                 box->cpu = -1;
1127                 if (new_cpu < 0)
1128                         continue;
1129
1130                 uncore_pmu_cancel_hrtimer(box);
1131                 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1132                 box->cpu = new_cpu;
1133         }
1134 }
1135
1136 static void uncore_change_context(struct intel_uncore_type **uncores,
1137                                   int old_cpu, int new_cpu)
1138 {
1139         for (; *uncores; uncores++)
1140                 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1141 }
1142
1143 static int uncore_event_cpu_offline(unsigned int cpu)
1144 {
1145         struct intel_uncore_type *type, **types = uncore_msr_uncores;
1146         struct intel_uncore_pmu *pmu;
1147         struct intel_uncore_box *box;
1148         int i, pkg, target;
1149
1150         /* Check if exiting cpu is used for collecting uncore events */
1151         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1152                 goto unref;
1153         /* Find a new cpu to collect uncore events */
1154         target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1155
1156         /* Migrate uncore events to the new target */
1157         if (target < nr_cpu_ids)
1158                 cpumask_set_cpu(target, &uncore_cpu_mask);
1159         else
1160                 target = -1;
1161
1162         uncore_change_context(uncore_msr_uncores, cpu, target);
1163         uncore_change_context(uncore_pci_uncores, cpu, target);
1164
1165 unref:
1166         /* Clear the references */
1167         pkg = topology_logical_package_id(cpu);
1168         for (; *types; types++) {
1169                 type = *types;
1170                 pmu = type->pmus;
1171                 for (i = 0; i < type->num_boxes; i++, pmu++) {
1172                         box = pmu->boxes[pkg];
1173                         if (box && atomic_dec_return(&box->refcnt) == 0)
1174                                 uncore_box_exit(box);
1175                 }
1176         }
1177         return 0;
1178 }
1179
1180 static int allocate_boxes(struct intel_uncore_type **types,
1181                          unsigned int pkg, unsigned int cpu)
1182 {
1183         struct intel_uncore_box *box, *tmp;
1184         struct intel_uncore_type *type;
1185         struct intel_uncore_pmu *pmu;
1186         LIST_HEAD(allocated);
1187         int i;
1188
1189         /* Try to allocate all required boxes */
1190         for (; *types; types++) {
1191                 type = *types;
1192                 pmu = type->pmus;
1193                 for (i = 0; i < type->num_boxes; i++, pmu++) {
1194                         if (pmu->boxes[pkg])
1195                                 continue;
1196                         box = uncore_alloc_box(type, cpu_to_node(cpu));
1197                         if (!box)
1198                                 goto cleanup;
1199                         box->pmu = pmu;
1200                         box->pkgid = pkg;
1201                         list_add(&box->active_list, &allocated);
1202                 }
1203         }
1204         /* Install them in the pmus */
1205         list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1206                 list_del_init(&box->active_list);
1207                 box->pmu->boxes[pkg] = box;
1208         }
1209         return 0;
1210
1211 cleanup:
1212         list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1213                 list_del_init(&box->active_list);
1214                 kfree(box);
1215         }
1216         return -ENOMEM;
1217 }
1218
1219 static int uncore_event_cpu_online(unsigned int cpu)
1220 {
1221         struct intel_uncore_type *type, **types = uncore_msr_uncores;
1222         struct intel_uncore_pmu *pmu;
1223         struct intel_uncore_box *box;
1224         int i, ret, pkg, target;
1225
1226         pkg = topology_logical_package_id(cpu);
1227         ret = allocate_boxes(types, pkg, cpu);
1228         if (ret)
1229                 return ret;
1230
1231         for (; *types; types++) {
1232                 type = *types;
1233                 pmu = type->pmus;
1234                 for (i = 0; i < type->num_boxes; i++, pmu++) {
1235                         box = pmu->boxes[pkg];
1236                         if (box && atomic_inc_return(&box->refcnt) == 1)
1237                                 uncore_box_init(box);
1238                 }
1239         }
1240
1241         /*
1242          * Check if there is an online cpu in the package
1243          * which collects uncore events already.
1244          */
1245         target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1246         if (target < nr_cpu_ids)
1247                 return 0;
1248
1249         cpumask_set_cpu(cpu, &uncore_cpu_mask);
1250
1251         uncore_change_context(uncore_msr_uncores, -1, cpu);
1252         uncore_change_context(uncore_pci_uncores, -1, cpu);
1253         return 0;
1254 }
1255
1256 static int __init type_pmu_register(struct intel_uncore_type *type)
1257 {
1258         int i, ret;
1259
1260         for (i = 0; i < type->num_boxes; i++) {
1261                 ret = uncore_pmu_register(&type->pmus[i]);
1262                 if (ret)
1263                         return ret;
1264         }
1265         return 0;
1266 }
1267
1268 static int __init uncore_msr_pmus_register(void)
1269 {
1270         struct intel_uncore_type **types = uncore_msr_uncores;
1271         int ret;
1272
1273         for (; *types; types++) {
1274                 ret = type_pmu_register(*types);
1275                 if (ret)
1276                         return ret;
1277         }
1278         return 0;
1279 }
1280
1281 static int __init uncore_cpu_init(void)
1282 {
1283         int ret;
1284
1285         ret = uncore_types_init(uncore_msr_uncores, true);
1286         if (ret)
1287                 goto err;
1288
1289         ret = uncore_msr_pmus_register();
1290         if (ret)
1291                 goto err;
1292         return 0;
1293 err:
1294         uncore_types_exit(uncore_msr_uncores);
1295         uncore_msr_uncores = empty_uncore;
1296         return ret;
1297 }
1298
1299 #define X86_UNCORE_MODEL_MATCH(model, init)     \
1300         { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1301
1302 struct intel_uncore_init_fun {
1303         void    (*cpu_init)(void);
1304         int     (*pci_init)(void);
1305 };
1306
1307 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1308         .cpu_init = nhm_uncore_cpu_init,
1309 };
1310
1311 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1312         .cpu_init = snb_uncore_cpu_init,
1313         .pci_init = snb_uncore_pci_init,
1314 };
1315
1316 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1317         .cpu_init = snb_uncore_cpu_init,
1318         .pci_init = ivb_uncore_pci_init,
1319 };
1320
1321 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1322         .cpu_init = snb_uncore_cpu_init,
1323         .pci_init = hsw_uncore_pci_init,
1324 };
1325
1326 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1327         .cpu_init = snb_uncore_cpu_init,
1328         .pci_init = bdw_uncore_pci_init,
1329 };
1330
1331 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1332         .cpu_init = snbep_uncore_cpu_init,
1333         .pci_init = snbep_uncore_pci_init,
1334 };
1335
1336 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1337         .cpu_init = nhmex_uncore_cpu_init,
1338 };
1339
1340 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1341         .cpu_init = ivbep_uncore_cpu_init,
1342         .pci_init = ivbep_uncore_pci_init,
1343 };
1344
1345 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1346         .cpu_init = hswep_uncore_cpu_init,
1347         .pci_init = hswep_uncore_pci_init,
1348 };
1349
1350 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1351         .cpu_init = bdx_uncore_cpu_init,
1352         .pci_init = bdx_uncore_pci_init,
1353 };
1354
1355 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1356         .cpu_init = knl_uncore_cpu_init,
1357         .pci_init = knl_uncore_pci_init,
1358 };
1359
1360 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1361         .cpu_init = skl_uncore_cpu_init,
1362         .pci_init = skl_uncore_pci_init,
1363 };
1364
1365 static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1366         .cpu_init = skx_uncore_cpu_init,
1367         .pci_init = skx_uncore_pci_init,
1368 };
1369
1370 static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1371         .cpu_init = icl_uncore_cpu_init,
1372         .pci_init = skl_uncore_pci_init,
1373 };
1374
1375 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1376         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,     nhm_uncore_init),
1377         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,        nhm_uncore_init),
1378         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE,       nhm_uncore_init),
1379         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP,    nhm_uncore_init),
1380         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,    snb_uncore_init),
1381         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,      ivb_uncore_init),
1382         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE,   hsw_uncore_init),
1383         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,    hsw_uncore_init),
1384         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E,   hsw_uncore_init),
1385         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1386         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1387         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X,  snbep_uncore_init),
1388         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX,     nhmex_uncore_init),
1389         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX,    nhmex_uncore_init),
1390         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X,    ivbep_uncore_init),
1391         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X,      hswep_uncore_init),
1392         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,    bdx_uncore_init),
1393         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1394         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,   knl_uncore_init),
1395         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM,   knl_uncore_init),
1396         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1397         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1398         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
1399         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
1400         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
1401         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
1402         {},
1403 };
1404
1405 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1406
1407 static int __init intel_uncore_init(void)
1408 {
1409         const struct x86_cpu_id *id;
1410         struct intel_uncore_init_fun *uncore_init;
1411         int pret = 0, cret = 0, ret;
1412
1413         id = x86_match_cpu(intel_uncore_match);
1414         if (!id)
1415                 return -ENODEV;
1416
1417         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1418                 return -ENODEV;
1419
1420         max_packages = topology_max_packages();
1421
1422         uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1423         if (uncore_init->pci_init) {
1424                 pret = uncore_init->pci_init();
1425                 if (!pret)
1426                         pret = uncore_pci_init();
1427         }
1428
1429         if (uncore_init->cpu_init) {
1430                 uncore_init->cpu_init();
1431                 cret = uncore_cpu_init();
1432         }
1433
1434         if (cret && pret)
1435                 return -ENODEV;
1436
1437         /* Install hotplug callbacks to setup the targets for each package */
1438         ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1439                                 "perf/x86/intel/uncore:online",
1440                                 uncore_event_cpu_online,
1441                                 uncore_event_cpu_offline);
1442         if (ret)
1443                 goto err;
1444         return 0;
1445
1446 err:
1447         uncore_types_exit(uncore_msr_uncores);
1448         uncore_pci_exit();
1449         return ret;
1450 }
1451 module_init(intel_uncore_init);
1452
1453 static void __exit intel_uncore_exit(void)
1454 {
1455         cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1456         uncore_types_exit(uncore_msr_uncores);
1457         uncore_pci_exit();
1458 }
1459 module_exit(intel_uncore_exit);