Merge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[linux-2.6-microblaze.git] / arch / x86 / events / core.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/export.h>
21 #include <linux/init.h>
22 #include <linux/kdebug.h>
23 #include <linux/sched.h>
24 #include <linux/uaccess.h>
25 #include <linux/slab.h>
26 #include <linux/cpu.h>
27 #include <linux/bitops.h>
28 #include <linux/device.h>
29
30 #include <asm/apic.h>
31 #include <asm/stacktrace.h>
32 #include <asm/nmi.h>
33 #include <asm/smp.h>
34 #include <asm/alternative.h>
35 #include <asm/mmu_context.h>
36 #include <asm/tlbflush.h>
37 #include <asm/timer.h>
38 #include <asm/desc.h>
39 #include <asm/ldt.h>
40
41 #include "perf_event.h"
42
43 struct x86_pmu x86_pmu __read_mostly;
44
45 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
46         .enabled = 1,
47 };
48
49 struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
50
51 u64 __read_mostly hw_cache_event_ids
52                                 [PERF_COUNT_HW_CACHE_MAX]
53                                 [PERF_COUNT_HW_CACHE_OP_MAX]
54                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
55 u64 __read_mostly hw_cache_extra_regs
56                                 [PERF_COUNT_HW_CACHE_MAX]
57                                 [PERF_COUNT_HW_CACHE_OP_MAX]
58                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
59
60 /*
61  * Propagate event elapsed time into the generic event.
62  * Can only be executed on the CPU where the event is active.
63  * Returns the delta events processed.
64  */
65 u64 x86_perf_event_update(struct perf_event *event)
66 {
67         struct hw_perf_event *hwc = &event->hw;
68         int shift = 64 - x86_pmu.cntval_bits;
69         u64 prev_raw_count, new_raw_count;
70         int idx = hwc->idx;
71         s64 delta;
72
73         if (idx == INTEL_PMC_IDX_FIXED_BTS)
74                 return 0;
75
76         /*
77          * Careful: an NMI might modify the previous event value.
78          *
79          * Our tactic to handle this is to first atomically read and
80          * exchange a new raw count - then add that new-prev delta
81          * count to the generic event atomically:
82          */
83 again:
84         prev_raw_count = local64_read(&hwc->prev_count);
85         rdpmcl(hwc->event_base_rdpmc, new_raw_count);
86
87         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
88                                         new_raw_count) != prev_raw_count)
89                 goto again;
90
91         /*
92          * Now we have the new raw value and have updated the prev
93          * timestamp already. We can now calculate the elapsed delta
94          * (event-)time and add that to the generic event.
95          *
96          * Careful, not all hw sign-extends above the physical width
97          * of the count.
98          */
99         delta = (new_raw_count << shift) - (prev_raw_count << shift);
100         delta >>= shift;
101
102         local64_add(delta, &event->count);
103         local64_sub(delta, &hwc->period_left);
104
105         return new_raw_count;
106 }
107
108 /*
109  * Find and validate any extra registers to set up.
110  */
111 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
112 {
113         struct hw_perf_event_extra *reg;
114         struct extra_reg *er;
115
116         reg = &event->hw.extra_reg;
117
118         if (!x86_pmu.extra_regs)
119                 return 0;
120
121         for (er = x86_pmu.extra_regs; er->msr; er++) {
122                 if (er->event != (config & er->config_mask))
123                         continue;
124                 if (event->attr.config1 & ~er->valid_mask)
125                         return -EINVAL;
126                 /* Check if the extra msrs can be safely accessed*/
127                 if (!er->extra_msr_access)
128                         return -ENXIO;
129
130                 reg->idx = er->idx;
131                 reg->config = event->attr.config1;
132                 reg->reg = er->msr;
133                 break;
134         }
135         return 0;
136 }
137
138 static atomic_t active_events;
139 static atomic_t pmc_refcount;
140 static DEFINE_MUTEX(pmc_reserve_mutex);
141
142 #ifdef CONFIG_X86_LOCAL_APIC
143
144 static bool reserve_pmc_hardware(void)
145 {
146         int i;
147
148         for (i = 0; i < x86_pmu.num_counters; i++) {
149                 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
150                         goto perfctr_fail;
151         }
152
153         for (i = 0; i < x86_pmu.num_counters; i++) {
154                 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
155                         goto eventsel_fail;
156         }
157
158         return true;
159
160 eventsel_fail:
161         for (i--; i >= 0; i--)
162                 release_evntsel_nmi(x86_pmu_config_addr(i));
163
164         i = x86_pmu.num_counters;
165
166 perfctr_fail:
167         for (i--; i >= 0; i--)
168                 release_perfctr_nmi(x86_pmu_event_addr(i));
169
170         return false;
171 }
172
173 static void release_pmc_hardware(void)
174 {
175         int i;
176
177         for (i = 0; i < x86_pmu.num_counters; i++) {
178                 release_perfctr_nmi(x86_pmu_event_addr(i));
179                 release_evntsel_nmi(x86_pmu_config_addr(i));
180         }
181 }
182
183 #else
184
185 static bool reserve_pmc_hardware(void) { return true; }
186 static void release_pmc_hardware(void) {}
187
188 #endif
189
190 static bool check_hw_exists(void)
191 {
192         u64 val, val_fail, val_new= ~0;
193         int i, reg, reg_fail, ret = 0;
194         int bios_fail = 0;
195         int reg_safe = -1;
196
197         /*
198          * Check to see if the BIOS enabled any of the counters, if so
199          * complain and bail.
200          */
201         for (i = 0; i < x86_pmu.num_counters; i++) {
202                 reg = x86_pmu_config_addr(i);
203                 ret = rdmsrl_safe(reg, &val);
204                 if (ret)
205                         goto msr_fail;
206                 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
207                         bios_fail = 1;
208                         val_fail = val;
209                         reg_fail = reg;
210                 } else {
211                         reg_safe = i;
212                 }
213         }
214
215         if (x86_pmu.num_counters_fixed) {
216                 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
217                 ret = rdmsrl_safe(reg, &val);
218                 if (ret)
219                         goto msr_fail;
220                 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
221                         if (val & (0x03 << i*4)) {
222                                 bios_fail = 1;
223                                 val_fail = val;
224                                 reg_fail = reg;
225                         }
226                 }
227         }
228
229         /*
230          * If all the counters are enabled, the below test will always
231          * fail.  The tools will also become useless in this scenario.
232          * Just fail and disable the hardware counters.
233          */
234
235         if (reg_safe == -1) {
236                 reg = reg_safe;
237                 goto msr_fail;
238         }
239
240         /*
241          * Read the current value, change it and read it back to see if it
242          * matches, this is needed to detect certain hardware emulators
243          * (qemu/kvm) that don't trap on the MSR access and always return 0s.
244          */
245         reg = x86_pmu_event_addr(reg_safe);
246         if (rdmsrl_safe(reg, &val))
247                 goto msr_fail;
248         val ^= 0xffffUL;
249         ret = wrmsrl_safe(reg, val);
250         ret |= rdmsrl_safe(reg, &val_new);
251         if (ret || val != val_new)
252                 goto msr_fail;
253
254         /*
255          * We still allow the PMU driver to operate:
256          */
257         if (bios_fail) {
258                 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
259                 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
260                               reg_fail, val_fail);
261         }
262
263         return true;
264
265 msr_fail:
266         pr_cont("Broken PMU hardware detected, using software events only.\n");
267         printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
268                 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
269                 reg, val_new);
270
271         return false;
272 }
273
274 static void hw_perf_event_destroy(struct perf_event *event)
275 {
276         x86_release_hardware();
277         atomic_dec(&active_events);
278 }
279
280 void hw_perf_lbr_event_destroy(struct perf_event *event)
281 {
282         hw_perf_event_destroy(event);
283
284         /* undo the lbr/bts event accounting */
285         x86_del_exclusive(x86_lbr_exclusive_lbr);
286 }
287
288 static inline int x86_pmu_initialized(void)
289 {
290         return x86_pmu.handle_irq != NULL;
291 }
292
293 static inline int
294 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
295 {
296         struct perf_event_attr *attr = &event->attr;
297         unsigned int cache_type, cache_op, cache_result;
298         u64 config, val;
299
300         config = attr->config;
301
302         cache_type = (config >>  0) & 0xff;
303         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
304                 return -EINVAL;
305
306         cache_op = (config >>  8) & 0xff;
307         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
308                 return -EINVAL;
309
310         cache_result = (config >> 16) & 0xff;
311         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
312                 return -EINVAL;
313
314         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
315
316         if (val == 0)
317                 return -ENOENT;
318
319         if (val == -1)
320                 return -EINVAL;
321
322         hwc->config |= val;
323         attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
324         return x86_pmu_extra_regs(val, event);
325 }
326
327 int x86_reserve_hardware(void)
328 {
329         int err = 0;
330
331         if (!atomic_inc_not_zero(&pmc_refcount)) {
332                 mutex_lock(&pmc_reserve_mutex);
333                 if (atomic_read(&pmc_refcount) == 0) {
334                         if (!reserve_pmc_hardware())
335                                 err = -EBUSY;
336                         else
337                                 reserve_ds_buffers();
338                 }
339                 if (!err)
340                         atomic_inc(&pmc_refcount);
341                 mutex_unlock(&pmc_reserve_mutex);
342         }
343
344         return err;
345 }
346
347 void x86_release_hardware(void)
348 {
349         if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
350                 release_pmc_hardware();
351                 release_ds_buffers();
352                 mutex_unlock(&pmc_reserve_mutex);
353         }
354 }
355
356 /*
357  * Check if we can create event of a certain type (that no conflicting events
358  * are present).
359  */
360 int x86_add_exclusive(unsigned int what)
361 {
362         int i;
363
364         if (x86_pmu.lbr_pt_coexist)
365                 return 0;
366
367         if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
368                 mutex_lock(&pmc_reserve_mutex);
369                 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
370                         if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
371                                 goto fail_unlock;
372                 }
373                 atomic_inc(&x86_pmu.lbr_exclusive[what]);
374                 mutex_unlock(&pmc_reserve_mutex);
375         }
376
377         atomic_inc(&active_events);
378         return 0;
379
380 fail_unlock:
381         mutex_unlock(&pmc_reserve_mutex);
382         return -EBUSY;
383 }
384
385 void x86_del_exclusive(unsigned int what)
386 {
387         if (x86_pmu.lbr_pt_coexist)
388                 return;
389
390         atomic_dec(&x86_pmu.lbr_exclusive[what]);
391         atomic_dec(&active_events);
392 }
393
394 int x86_setup_perfctr(struct perf_event *event)
395 {
396         struct perf_event_attr *attr = &event->attr;
397         struct hw_perf_event *hwc = &event->hw;
398         u64 config;
399
400         if (!is_sampling_event(event)) {
401                 hwc->sample_period = x86_pmu.max_period;
402                 hwc->last_period = hwc->sample_period;
403                 local64_set(&hwc->period_left, hwc->sample_period);
404         }
405
406         if (attr->type == PERF_TYPE_RAW)
407                 return x86_pmu_extra_regs(event->attr.config, event);
408
409         if (attr->type == PERF_TYPE_HW_CACHE)
410                 return set_ext_hw_attr(hwc, event);
411
412         if (attr->config >= x86_pmu.max_events)
413                 return -EINVAL;
414
415         /*
416          * The generic map:
417          */
418         config = x86_pmu.event_map(attr->config);
419
420         if (config == 0)
421                 return -ENOENT;
422
423         if (config == -1LL)
424                 return -EINVAL;
425
426         /*
427          * Branch tracing:
428          */
429         if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
430             !attr->freq && hwc->sample_period == 1) {
431                 /* BTS is not supported by this architecture. */
432                 if (!x86_pmu.bts_active)
433                         return -EOPNOTSUPP;
434
435                 /* BTS is currently only allowed for user-mode. */
436                 if (!attr->exclude_kernel)
437                         return -EOPNOTSUPP;
438
439                 /* disallow bts if conflicting events are present */
440                 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
441                         return -EBUSY;
442
443                 event->destroy = hw_perf_lbr_event_destroy;
444         }
445
446         hwc->config |= config;
447
448         return 0;
449 }
450
451 /*
452  * check that branch_sample_type is compatible with
453  * settings needed for precise_ip > 1 which implies
454  * using the LBR to capture ALL taken branches at the
455  * priv levels of the measurement
456  */
457 static inline int precise_br_compat(struct perf_event *event)
458 {
459         u64 m = event->attr.branch_sample_type;
460         u64 b = 0;
461
462         /* must capture all branches */
463         if (!(m & PERF_SAMPLE_BRANCH_ANY))
464                 return 0;
465
466         m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
467
468         if (!event->attr.exclude_user)
469                 b |= PERF_SAMPLE_BRANCH_USER;
470
471         if (!event->attr.exclude_kernel)
472                 b |= PERF_SAMPLE_BRANCH_KERNEL;
473
474         /*
475          * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
476          */
477
478         return m == b;
479 }
480
481 int x86_pmu_hw_config(struct perf_event *event)
482 {
483         if (event->attr.precise_ip) {
484                 int precise = 0;
485
486                 /* Support for constant skid */
487                 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
488                         precise++;
489
490                         /* Support for IP fixup */
491                         if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
492                                 precise++;
493
494                         if (x86_pmu.pebs_prec_dist)
495                                 precise++;
496                 }
497
498                 if (event->attr.precise_ip > precise)
499                         return -EOPNOTSUPP;
500         }
501         /*
502          * check that PEBS LBR correction does not conflict with
503          * whatever the user is asking with attr->branch_sample_type
504          */
505         if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
506                 u64 *br_type = &event->attr.branch_sample_type;
507
508                 if (has_branch_stack(event)) {
509                         if (!precise_br_compat(event))
510                                 return -EOPNOTSUPP;
511
512                         /* branch_sample_type is compatible */
513
514                 } else {
515                         /*
516                          * user did not specify  branch_sample_type
517                          *
518                          * For PEBS fixups, we capture all
519                          * the branches at the priv level of the
520                          * event.
521                          */
522                         *br_type = PERF_SAMPLE_BRANCH_ANY;
523
524                         if (!event->attr.exclude_user)
525                                 *br_type |= PERF_SAMPLE_BRANCH_USER;
526
527                         if (!event->attr.exclude_kernel)
528                                 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
529                 }
530         }
531
532         if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
533                 event->attach_state |= PERF_ATTACH_TASK_DATA;
534
535         /*
536          * Generate PMC IRQs:
537          * (keep 'enabled' bit clear for now)
538          */
539         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
540
541         /*
542          * Count user and OS events unless requested not to
543          */
544         if (!event->attr.exclude_user)
545                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
546         if (!event->attr.exclude_kernel)
547                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
548
549         if (event->attr.type == PERF_TYPE_RAW)
550                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
551
552         if (event->attr.sample_period && x86_pmu.limit_period) {
553                 if (x86_pmu.limit_period(event, event->attr.sample_period) >
554                                 event->attr.sample_period)
555                         return -EINVAL;
556         }
557
558         return x86_setup_perfctr(event);
559 }
560
561 /*
562  * Setup the hardware configuration for a given attr_type
563  */
564 static int __x86_pmu_event_init(struct perf_event *event)
565 {
566         int err;
567
568         if (!x86_pmu_initialized())
569                 return -ENODEV;
570
571         err = x86_reserve_hardware();
572         if (err)
573                 return err;
574
575         atomic_inc(&active_events);
576         event->destroy = hw_perf_event_destroy;
577
578         event->hw.idx = -1;
579         event->hw.last_cpu = -1;
580         event->hw.last_tag = ~0ULL;
581
582         /* mark unused */
583         event->hw.extra_reg.idx = EXTRA_REG_NONE;
584         event->hw.branch_reg.idx = EXTRA_REG_NONE;
585
586         return x86_pmu.hw_config(event);
587 }
588
589 void x86_pmu_disable_all(void)
590 {
591         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
592         int idx;
593
594         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
595                 u64 val;
596
597                 if (!test_bit(idx, cpuc->active_mask))
598                         continue;
599                 rdmsrl(x86_pmu_config_addr(idx), val);
600                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
601                         continue;
602                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
603                 wrmsrl(x86_pmu_config_addr(idx), val);
604         }
605 }
606
607 /*
608  * There may be PMI landing after enabled=0. The PMI hitting could be before or
609  * after disable_all.
610  *
611  * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
612  * It will not be re-enabled in the NMI handler again, because enabled=0. After
613  * handling the NMI, disable_all will be called, which will not change the
614  * state either. If PMI hits after disable_all, the PMU is already disabled
615  * before entering NMI handler. The NMI handler will not change the state
616  * either.
617  *
618  * So either situation is harmless.
619  */
620 static void x86_pmu_disable(struct pmu *pmu)
621 {
622         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
623
624         if (!x86_pmu_initialized())
625                 return;
626
627         if (!cpuc->enabled)
628                 return;
629
630         cpuc->n_added = 0;
631         cpuc->enabled = 0;
632         barrier();
633
634         x86_pmu.disable_all();
635 }
636
637 void x86_pmu_enable_all(int added)
638 {
639         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
640         int idx;
641
642         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
643                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
644
645                 if (!test_bit(idx, cpuc->active_mask))
646                         continue;
647
648                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
649         }
650 }
651
652 static struct pmu pmu;
653
654 static inline int is_x86_event(struct perf_event *event)
655 {
656         return event->pmu == &pmu;
657 }
658
659 /*
660  * Event scheduler state:
661  *
662  * Assign events iterating over all events and counters, beginning
663  * with events with least weights first. Keep the current iterator
664  * state in struct sched_state.
665  */
666 struct sched_state {
667         int     weight;
668         int     event;          /* event index */
669         int     counter;        /* counter index */
670         int     unassigned;     /* number of events to be assigned left */
671         int     nr_gp;          /* number of GP counters used */
672         unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
673 };
674
675 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
676 #define SCHED_STATES_MAX        2
677
678 struct perf_sched {
679         int                     max_weight;
680         int                     max_events;
681         int                     max_gp;
682         int                     saved_states;
683         struct event_constraint **constraints;
684         struct sched_state      state;
685         struct sched_state      saved[SCHED_STATES_MAX];
686 };
687
688 /*
689  * Initialize interator that runs through all events and counters.
690  */
691 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
692                             int num, int wmin, int wmax, int gpmax)
693 {
694         int idx;
695
696         memset(sched, 0, sizeof(*sched));
697         sched->max_events       = num;
698         sched->max_weight       = wmax;
699         sched->max_gp           = gpmax;
700         sched->constraints      = constraints;
701
702         for (idx = 0; idx < num; idx++) {
703                 if (constraints[idx]->weight == wmin)
704                         break;
705         }
706
707         sched->state.event      = idx;          /* start with min weight */
708         sched->state.weight     = wmin;
709         sched->state.unassigned = num;
710 }
711
712 static void perf_sched_save_state(struct perf_sched *sched)
713 {
714         if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
715                 return;
716
717         sched->saved[sched->saved_states] = sched->state;
718         sched->saved_states++;
719 }
720
721 static bool perf_sched_restore_state(struct perf_sched *sched)
722 {
723         if (!sched->saved_states)
724                 return false;
725
726         sched->saved_states--;
727         sched->state = sched->saved[sched->saved_states];
728
729         /* continue with next counter: */
730         clear_bit(sched->state.counter++, sched->state.used);
731
732         return true;
733 }
734
735 /*
736  * Select a counter for the current event to schedule. Return true on
737  * success.
738  */
739 static bool __perf_sched_find_counter(struct perf_sched *sched)
740 {
741         struct event_constraint *c;
742         int idx;
743
744         if (!sched->state.unassigned)
745                 return false;
746
747         if (sched->state.event >= sched->max_events)
748                 return false;
749
750         c = sched->constraints[sched->state.event];
751         /* Prefer fixed purpose counters */
752         if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
753                 idx = INTEL_PMC_IDX_FIXED;
754                 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
755                         if (!__test_and_set_bit(idx, sched->state.used))
756                                 goto done;
757                 }
758         }
759
760         /* Grab the first unused counter starting with idx */
761         idx = sched->state.counter;
762         for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
763                 if (!__test_and_set_bit(idx, sched->state.used)) {
764                         if (sched->state.nr_gp++ >= sched->max_gp)
765                                 return false;
766
767                         goto done;
768                 }
769         }
770
771         return false;
772
773 done:
774         sched->state.counter = idx;
775
776         if (c->overlap)
777                 perf_sched_save_state(sched);
778
779         return true;
780 }
781
782 static bool perf_sched_find_counter(struct perf_sched *sched)
783 {
784         while (!__perf_sched_find_counter(sched)) {
785                 if (!perf_sched_restore_state(sched))
786                         return false;
787         }
788
789         return true;
790 }
791
792 /*
793  * Go through all unassigned events and find the next one to schedule.
794  * Take events with the least weight first. Return true on success.
795  */
796 static bool perf_sched_next_event(struct perf_sched *sched)
797 {
798         struct event_constraint *c;
799
800         if (!sched->state.unassigned || !--sched->state.unassigned)
801                 return false;
802
803         do {
804                 /* next event */
805                 sched->state.event++;
806                 if (sched->state.event >= sched->max_events) {
807                         /* next weight */
808                         sched->state.event = 0;
809                         sched->state.weight++;
810                         if (sched->state.weight > sched->max_weight)
811                                 return false;
812                 }
813                 c = sched->constraints[sched->state.event];
814         } while (c->weight != sched->state.weight);
815
816         sched->state.counter = 0;       /* start with first counter */
817
818         return true;
819 }
820
821 /*
822  * Assign a counter for each event.
823  */
824 int perf_assign_events(struct event_constraint **constraints, int n,
825                         int wmin, int wmax, int gpmax, int *assign)
826 {
827         struct perf_sched sched;
828
829         perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
830
831         do {
832                 if (!perf_sched_find_counter(&sched))
833                         break;  /* failed */
834                 if (assign)
835                         assign[sched.state.event] = sched.state.counter;
836         } while (perf_sched_next_event(&sched));
837
838         return sched.state.unassigned;
839 }
840 EXPORT_SYMBOL_GPL(perf_assign_events);
841
842 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
843 {
844         struct event_constraint *c;
845         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
846         struct perf_event *e;
847         int i, wmin, wmax, unsched = 0;
848         struct hw_perf_event *hwc;
849
850         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
851
852         if (x86_pmu.start_scheduling)
853                 x86_pmu.start_scheduling(cpuc);
854
855         for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
856                 cpuc->event_constraint[i] = NULL;
857                 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
858                 cpuc->event_constraint[i] = c;
859
860                 wmin = min(wmin, c->weight);
861                 wmax = max(wmax, c->weight);
862         }
863
864         /*
865          * fastpath, try to reuse previous register
866          */
867         for (i = 0; i < n; i++) {
868                 hwc = &cpuc->event_list[i]->hw;
869                 c = cpuc->event_constraint[i];
870
871                 /* never assigned */
872                 if (hwc->idx == -1)
873                         break;
874
875                 /* constraint still honored */
876                 if (!test_bit(hwc->idx, c->idxmsk))
877                         break;
878
879                 /* not already used */
880                 if (test_bit(hwc->idx, used_mask))
881                         break;
882
883                 __set_bit(hwc->idx, used_mask);
884                 if (assign)
885                         assign[i] = hwc->idx;
886         }
887
888         /* slow path */
889         if (i != n) {
890                 int gpmax = x86_pmu.num_counters;
891
892                 /*
893                  * Do not allow scheduling of more than half the available
894                  * generic counters.
895                  *
896                  * This helps avoid counter starvation of sibling thread by
897                  * ensuring at most half the counters cannot be in exclusive
898                  * mode. There is no designated counters for the limits. Any
899                  * N/2 counters can be used. This helps with events with
900                  * specific counter constraints.
901                  */
902                 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
903                     READ_ONCE(cpuc->excl_cntrs->exclusive_present))
904                         gpmax /= 2;
905
906                 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
907                                              wmax, gpmax, assign);
908         }
909
910         /*
911          * In case of success (unsched = 0), mark events as committed,
912          * so we do not put_constraint() in case new events are added
913          * and fail to be scheduled
914          *
915          * We invoke the lower level commit callback to lock the resource
916          *
917          * We do not need to do all of this in case we are called to
918          * validate an event group (assign == NULL)
919          */
920         if (!unsched && assign) {
921                 for (i = 0; i < n; i++) {
922                         e = cpuc->event_list[i];
923                         e->hw.flags |= PERF_X86_EVENT_COMMITTED;
924                         if (x86_pmu.commit_scheduling)
925                                 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
926                 }
927         } else {
928                 for (i = 0; i < n; i++) {
929                         e = cpuc->event_list[i];
930                         /*
931                          * do not put_constraint() on comitted events,
932                          * because they are good to go
933                          */
934                         if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
935                                 continue;
936
937                         /*
938                          * release events that failed scheduling
939                          */
940                         if (x86_pmu.put_event_constraints)
941                                 x86_pmu.put_event_constraints(cpuc, e);
942                 }
943         }
944
945         if (x86_pmu.stop_scheduling)
946                 x86_pmu.stop_scheduling(cpuc);
947
948         return unsched ? -EINVAL : 0;
949 }
950
951 /*
952  * dogrp: true if must collect siblings events (group)
953  * returns total number of events and error code
954  */
955 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
956 {
957         struct perf_event *event;
958         int n, max_count;
959
960         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
961
962         /* current number of events already accepted */
963         n = cpuc->n_events;
964
965         if (is_x86_event(leader)) {
966                 if (n >= max_count)
967                         return -EINVAL;
968                 cpuc->event_list[n] = leader;
969                 n++;
970         }
971         if (!dogrp)
972                 return n;
973
974         list_for_each_entry(event, &leader->sibling_list, group_entry) {
975                 if (!is_x86_event(event) ||
976                     event->state <= PERF_EVENT_STATE_OFF)
977                         continue;
978
979                 if (n >= max_count)
980                         return -EINVAL;
981
982                 cpuc->event_list[n] = event;
983                 n++;
984         }
985         return n;
986 }
987
988 static inline void x86_assign_hw_event(struct perf_event *event,
989                                 struct cpu_hw_events *cpuc, int i)
990 {
991         struct hw_perf_event *hwc = &event->hw;
992
993         hwc->idx = cpuc->assign[i];
994         hwc->last_cpu = smp_processor_id();
995         hwc->last_tag = ++cpuc->tags[i];
996
997         if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
998                 hwc->config_base = 0;
999                 hwc->event_base = 0;
1000         } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1001                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1002                 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1003                 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
1004         } else {
1005                 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1006                 hwc->event_base  = x86_pmu_event_addr(hwc->idx);
1007                 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1008         }
1009 }
1010
1011 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1012                                         struct cpu_hw_events *cpuc,
1013                                         int i)
1014 {
1015         return hwc->idx == cpuc->assign[i] &&
1016                 hwc->last_cpu == smp_processor_id() &&
1017                 hwc->last_tag == cpuc->tags[i];
1018 }
1019
1020 static void x86_pmu_start(struct perf_event *event, int flags);
1021
1022 static void x86_pmu_enable(struct pmu *pmu)
1023 {
1024         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1025         struct perf_event *event;
1026         struct hw_perf_event *hwc;
1027         int i, added = cpuc->n_added;
1028
1029         if (!x86_pmu_initialized())
1030                 return;
1031
1032         if (cpuc->enabled)
1033                 return;
1034
1035         if (cpuc->n_added) {
1036                 int n_running = cpuc->n_events - cpuc->n_added;
1037                 /*
1038                  * apply assignment obtained either from
1039                  * hw_perf_group_sched_in() or x86_pmu_enable()
1040                  *
1041                  * step1: save events moving to new counters
1042                  */
1043                 for (i = 0; i < n_running; i++) {
1044                         event = cpuc->event_list[i];
1045                         hwc = &event->hw;
1046
1047                         /*
1048                          * we can avoid reprogramming counter if:
1049                          * - assigned same counter as last time
1050                          * - running on same CPU as last time
1051                          * - no other event has used the counter since
1052                          */
1053                         if (hwc->idx == -1 ||
1054                             match_prev_assignment(hwc, cpuc, i))
1055                                 continue;
1056
1057                         /*
1058                          * Ensure we don't accidentally enable a stopped
1059                          * counter simply because we rescheduled.
1060                          */
1061                         if (hwc->state & PERF_HES_STOPPED)
1062                                 hwc->state |= PERF_HES_ARCH;
1063
1064                         x86_pmu_stop(event, PERF_EF_UPDATE);
1065                 }
1066
1067                 /*
1068                  * step2: reprogram moved events into new counters
1069                  */
1070                 for (i = 0; i < cpuc->n_events; i++) {
1071                         event = cpuc->event_list[i];
1072                         hwc = &event->hw;
1073
1074                         if (!match_prev_assignment(hwc, cpuc, i))
1075                                 x86_assign_hw_event(event, cpuc, i);
1076                         else if (i < n_running)
1077                                 continue;
1078
1079                         if (hwc->state & PERF_HES_ARCH)
1080                                 continue;
1081
1082                         x86_pmu_start(event, PERF_EF_RELOAD);
1083                 }
1084                 cpuc->n_added = 0;
1085                 perf_events_lapic_init();
1086         }
1087
1088         cpuc->enabled = 1;
1089         barrier();
1090
1091         x86_pmu.enable_all(added);
1092 }
1093
1094 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1095
1096 /*
1097  * Set the next IRQ period, based on the hwc->period_left value.
1098  * To be called with the event disabled in hw:
1099  */
1100 int x86_perf_event_set_period(struct perf_event *event)
1101 {
1102         struct hw_perf_event *hwc = &event->hw;
1103         s64 left = local64_read(&hwc->period_left);
1104         s64 period = hwc->sample_period;
1105         int ret = 0, idx = hwc->idx;
1106
1107         if (idx == INTEL_PMC_IDX_FIXED_BTS)
1108                 return 0;
1109
1110         /*
1111          * If we are way outside a reasonable range then just skip forward:
1112          */
1113         if (unlikely(left <= -period)) {
1114                 left = period;
1115                 local64_set(&hwc->period_left, left);
1116                 hwc->last_period = period;
1117                 ret = 1;
1118         }
1119
1120         if (unlikely(left <= 0)) {
1121                 left += period;
1122                 local64_set(&hwc->period_left, left);
1123                 hwc->last_period = period;
1124                 ret = 1;
1125         }
1126         /*
1127          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1128          */
1129         if (unlikely(left < 2))
1130                 left = 2;
1131
1132         if (left > x86_pmu.max_period)
1133                 left = x86_pmu.max_period;
1134
1135         if (x86_pmu.limit_period)
1136                 left = x86_pmu.limit_period(event, left);
1137
1138         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1139
1140         if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
1141             local64_read(&hwc->prev_count) != (u64)-left) {
1142                 /*
1143                  * The hw event starts counting from this event offset,
1144                  * mark it to be able to extra future deltas:
1145                  */
1146                 local64_set(&hwc->prev_count, (u64)-left);
1147
1148                 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1149         }
1150
1151         /*
1152          * Due to erratum on certan cpu we need
1153          * a second write to be sure the register
1154          * is updated properly
1155          */
1156         if (x86_pmu.perfctr_second_write) {
1157                 wrmsrl(hwc->event_base,
1158                         (u64)(-left) & x86_pmu.cntval_mask);
1159         }
1160
1161         perf_event_update_userpage(event);
1162
1163         return ret;
1164 }
1165
1166 void x86_pmu_enable_event(struct perf_event *event)
1167 {
1168         if (__this_cpu_read(cpu_hw_events.enabled))
1169                 __x86_pmu_enable_event(&event->hw,
1170                                        ARCH_PERFMON_EVENTSEL_ENABLE);
1171 }
1172
1173 /*
1174  * Add a single event to the PMU.
1175  *
1176  * The event is added to the group of enabled events
1177  * but only if it can be scehduled with existing events.
1178  */
1179 static int x86_pmu_add(struct perf_event *event, int flags)
1180 {
1181         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1182         struct hw_perf_event *hwc;
1183         int assign[X86_PMC_IDX_MAX];
1184         int n, n0, ret;
1185
1186         hwc = &event->hw;
1187
1188         n0 = cpuc->n_events;
1189         ret = n = collect_events(cpuc, event, false);
1190         if (ret < 0)
1191                 goto out;
1192
1193         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1194         if (!(flags & PERF_EF_START))
1195                 hwc->state |= PERF_HES_ARCH;
1196
1197         /*
1198          * If group events scheduling transaction was started,
1199          * skip the schedulability test here, it will be performed
1200          * at commit time (->commit_txn) as a whole.
1201          */
1202         if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1203                 goto done_collect;
1204
1205         ret = x86_pmu.schedule_events(cpuc, n, assign);
1206         if (ret)
1207                 goto out;
1208         /*
1209          * copy new assignment, now we know it is possible
1210          * will be used by hw_perf_enable()
1211          */
1212         memcpy(cpuc->assign, assign, n*sizeof(int));
1213
1214 done_collect:
1215         /*
1216          * Commit the collect_events() state. See x86_pmu_del() and
1217          * x86_pmu_*_txn().
1218          */
1219         cpuc->n_events = n;
1220         cpuc->n_added += n - n0;
1221         cpuc->n_txn += n - n0;
1222
1223         ret = 0;
1224 out:
1225         return ret;
1226 }
1227
1228 static void x86_pmu_start(struct perf_event *event, int flags)
1229 {
1230         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1231         int idx = event->hw.idx;
1232
1233         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1234                 return;
1235
1236         if (WARN_ON_ONCE(idx == -1))
1237                 return;
1238
1239         if (flags & PERF_EF_RELOAD) {
1240                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1241                 x86_perf_event_set_period(event);
1242         }
1243
1244         event->hw.state = 0;
1245
1246         cpuc->events[idx] = event;
1247         __set_bit(idx, cpuc->active_mask);
1248         __set_bit(idx, cpuc->running);
1249         x86_pmu.enable(event);
1250         perf_event_update_userpage(event);
1251 }
1252
1253 void perf_event_print_debug(void)
1254 {
1255         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1256         u64 pebs, debugctl;
1257         struct cpu_hw_events *cpuc;
1258         unsigned long flags;
1259         int cpu, idx;
1260
1261         if (!x86_pmu.num_counters)
1262                 return;
1263
1264         local_irq_save(flags);
1265
1266         cpu = smp_processor_id();
1267         cpuc = &per_cpu(cpu_hw_events, cpu);
1268
1269         if (x86_pmu.version >= 2) {
1270                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1271                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1272                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1273                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1274
1275                 pr_info("\n");
1276                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1277                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1278                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1279                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1280                 if (x86_pmu.pebs_constraints) {
1281                         rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1282                         pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1283                 }
1284                 if (x86_pmu.lbr_nr) {
1285                         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1286                         pr_info("CPU#%d: debugctl:   %016llx\n", cpu, debugctl);
1287                 }
1288         }
1289         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1290
1291         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1292                 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1293                 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1294
1295                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1296
1297                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1298                         cpu, idx, pmc_ctrl);
1299                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1300                         cpu, idx, pmc_count);
1301                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1302                         cpu, idx, prev_left);
1303         }
1304         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1305                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1306
1307                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1308                         cpu, idx, pmc_count);
1309         }
1310         local_irq_restore(flags);
1311 }
1312
1313 void x86_pmu_stop(struct perf_event *event, int flags)
1314 {
1315         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1316         struct hw_perf_event *hwc = &event->hw;
1317
1318         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1319                 x86_pmu.disable(event);
1320                 cpuc->events[hwc->idx] = NULL;
1321                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1322                 hwc->state |= PERF_HES_STOPPED;
1323         }
1324
1325         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1326                 /*
1327                  * Drain the remaining delta count out of a event
1328                  * that we are disabling:
1329                  */
1330                 x86_perf_event_update(event);
1331                 hwc->state |= PERF_HES_UPTODATE;
1332         }
1333 }
1334
1335 static void x86_pmu_del(struct perf_event *event, int flags)
1336 {
1337         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1338         int i;
1339
1340         /*
1341          * event is descheduled
1342          */
1343         event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1344
1345         /*
1346          * If we're called during a txn, we don't need to do anything.
1347          * The events never got scheduled and ->cancel_txn will truncate
1348          * the event_list.
1349          *
1350          * XXX assumes any ->del() called during a TXN will only be on
1351          * an event added during that same TXN.
1352          */
1353         if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1354                 return;
1355
1356         /*
1357          * Not a TXN, therefore cleanup properly.
1358          */
1359         x86_pmu_stop(event, PERF_EF_UPDATE);
1360
1361         for (i = 0; i < cpuc->n_events; i++) {
1362                 if (event == cpuc->event_list[i])
1363                         break;
1364         }
1365
1366         if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1367                 return;
1368
1369         /* If we have a newly added event; make sure to decrease n_added. */
1370         if (i >= cpuc->n_events - cpuc->n_added)
1371                 --cpuc->n_added;
1372
1373         if (x86_pmu.put_event_constraints)
1374                 x86_pmu.put_event_constraints(cpuc, event);
1375
1376         /* Delete the array entry. */
1377         while (++i < cpuc->n_events) {
1378                 cpuc->event_list[i-1] = cpuc->event_list[i];
1379                 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1380         }
1381         --cpuc->n_events;
1382
1383         perf_event_update_userpage(event);
1384 }
1385
1386 int x86_pmu_handle_irq(struct pt_regs *regs)
1387 {
1388         struct perf_sample_data data;
1389         struct cpu_hw_events *cpuc;
1390         struct perf_event *event;
1391         int idx, handled = 0;
1392         u64 val;
1393
1394         cpuc = this_cpu_ptr(&cpu_hw_events);
1395
1396         /*
1397          * Some chipsets need to unmask the LVTPC in a particular spot
1398          * inside the nmi handler.  As a result, the unmasking was pushed
1399          * into all the nmi handlers.
1400          *
1401          * This generic handler doesn't seem to have any issues where the
1402          * unmasking occurs so it was left at the top.
1403          */
1404         apic_write(APIC_LVTPC, APIC_DM_NMI);
1405
1406         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1407                 if (!test_bit(idx, cpuc->active_mask)) {
1408                         /*
1409                          * Though we deactivated the counter some cpus
1410                          * might still deliver spurious interrupts still
1411                          * in flight. Catch them:
1412                          */
1413                         if (__test_and_clear_bit(idx, cpuc->running))
1414                                 handled++;
1415                         continue;
1416                 }
1417
1418                 event = cpuc->events[idx];
1419
1420                 val = x86_perf_event_update(event);
1421                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1422                         continue;
1423
1424                 /*
1425                  * event overflow
1426                  */
1427                 handled++;
1428                 perf_sample_data_init(&data, 0, event->hw.last_period);
1429
1430                 if (!x86_perf_event_set_period(event))
1431                         continue;
1432
1433                 if (perf_event_overflow(event, &data, regs))
1434                         x86_pmu_stop(event, 0);
1435         }
1436
1437         if (handled)
1438                 inc_irq_stat(apic_perf_irqs);
1439
1440         return handled;
1441 }
1442
1443 void perf_events_lapic_init(void)
1444 {
1445         if (!x86_pmu.apic || !x86_pmu_initialized())
1446                 return;
1447
1448         /*
1449          * Always use NMI for PMU
1450          */
1451         apic_write(APIC_LVTPC, APIC_DM_NMI);
1452 }
1453
1454 static int
1455 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1456 {
1457         u64 start_clock;
1458         u64 finish_clock;
1459         int ret;
1460
1461         /*
1462          * All PMUs/events that share this PMI handler should make sure to
1463          * increment active_events for their events.
1464          */
1465         if (!atomic_read(&active_events))
1466                 return NMI_DONE;
1467
1468         start_clock = sched_clock();
1469         ret = x86_pmu.handle_irq(regs);
1470         finish_clock = sched_clock();
1471
1472         perf_sample_event_took(finish_clock - start_clock);
1473
1474         return ret;
1475 }
1476 NOKPROBE_SYMBOL(perf_event_nmi_handler);
1477
1478 struct event_constraint emptyconstraint;
1479 struct event_constraint unconstrained;
1480
1481 static int x86_pmu_prepare_cpu(unsigned int cpu)
1482 {
1483         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1484         int i;
1485
1486         for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1487                 cpuc->kfree_on_online[i] = NULL;
1488         if (x86_pmu.cpu_prepare)
1489                 return x86_pmu.cpu_prepare(cpu);
1490         return 0;
1491 }
1492
1493 static int x86_pmu_dead_cpu(unsigned int cpu)
1494 {
1495         if (x86_pmu.cpu_dead)
1496                 x86_pmu.cpu_dead(cpu);
1497         return 0;
1498 }
1499
1500 static int x86_pmu_online_cpu(unsigned int cpu)
1501 {
1502         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1503         int i;
1504
1505         for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1506                 kfree(cpuc->kfree_on_online[i]);
1507                 cpuc->kfree_on_online[i] = NULL;
1508         }
1509         return 0;
1510 }
1511
1512 static int x86_pmu_starting_cpu(unsigned int cpu)
1513 {
1514         if (x86_pmu.cpu_starting)
1515                 x86_pmu.cpu_starting(cpu);
1516         return 0;
1517 }
1518
1519 static int x86_pmu_dying_cpu(unsigned int cpu)
1520 {
1521         if (x86_pmu.cpu_dying)
1522                 x86_pmu.cpu_dying(cpu);
1523         return 0;
1524 }
1525
1526 static void __init pmu_check_apic(void)
1527 {
1528         if (boot_cpu_has(X86_FEATURE_APIC))
1529                 return;
1530
1531         x86_pmu.apic = 0;
1532         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1533         pr_info("no hardware sampling interrupt available.\n");
1534
1535         /*
1536          * If we have a PMU initialized but no APIC
1537          * interrupts, we cannot sample hardware
1538          * events (user-space has to fall back and
1539          * sample via a hrtimer based software event):
1540          */
1541         pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1542
1543 }
1544
1545 static struct attribute_group x86_pmu_format_group = {
1546         .name = "format",
1547         .attrs = NULL,
1548 };
1549
1550 /*
1551  * Remove all undefined events (x86_pmu.event_map(id) == 0)
1552  * out of events_attr attributes.
1553  */
1554 static void __init filter_events(struct attribute **attrs)
1555 {
1556         struct device_attribute *d;
1557         struct perf_pmu_events_attr *pmu_attr;
1558         int offset = 0;
1559         int i, j;
1560
1561         for (i = 0; attrs[i]; i++) {
1562                 d = (struct device_attribute *)attrs[i];
1563                 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1564                 /* str trumps id */
1565                 if (pmu_attr->event_str)
1566                         continue;
1567                 if (x86_pmu.event_map(i + offset))
1568                         continue;
1569
1570                 for (j = i; attrs[j]; j++)
1571                         attrs[j] = attrs[j + 1];
1572
1573                 /* Check the shifted attr. */
1574                 i--;
1575
1576                 /*
1577                  * event_map() is index based, the attrs array is organized
1578                  * by increasing event index. If we shift the events, then
1579                  * we need to compensate for the event_map(), otherwise
1580                  * we are looking up the wrong event in the map
1581                  */
1582                 offset++;
1583         }
1584 }
1585
1586 /* Merge two pointer arrays */
1587 __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1588 {
1589         struct attribute **new;
1590         int j, i;
1591
1592         for (j = 0; a[j]; j++)
1593                 ;
1594         for (i = 0; b[i]; i++)
1595                 j++;
1596         j++;
1597
1598         new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1599         if (!new)
1600                 return NULL;
1601
1602         j = 0;
1603         for (i = 0; a[i]; i++)
1604                 new[j++] = a[i];
1605         for (i = 0; b[i]; i++)
1606                 new[j++] = b[i];
1607         new[j] = NULL;
1608
1609         return new;
1610 }
1611
1612 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
1613 {
1614         struct perf_pmu_events_attr *pmu_attr = \
1615                 container_of(attr, struct perf_pmu_events_attr, attr);
1616         u64 config = x86_pmu.event_map(pmu_attr->id);
1617
1618         /* string trumps id */
1619         if (pmu_attr->event_str)
1620                 return sprintf(page, "%s", pmu_attr->event_str);
1621
1622         return x86_pmu.events_sysfs_show(page, config);
1623 }
1624 EXPORT_SYMBOL_GPL(events_sysfs_show);
1625
1626 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1627                           char *page)
1628 {
1629         struct perf_pmu_events_ht_attr *pmu_attr =
1630                 container_of(attr, struct perf_pmu_events_ht_attr, attr);
1631
1632         /*
1633          * Report conditional events depending on Hyper-Threading.
1634          *
1635          * This is overly conservative as usually the HT special
1636          * handling is not needed if the other CPU thread is idle.
1637          *
1638          * Note this does not (and cannot) handle the case when thread
1639          * siblings are invisible, for example with virtualization
1640          * if they are owned by some other guest.  The user tool
1641          * has to re-read when a thread sibling gets onlined later.
1642          */
1643         return sprintf(page, "%s",
1644                         topology_max_smt_threads() > 1 ?
1645                         pmu_attr->event_str_ht :
1646                         pmu_attr->event_str_noht);
1647 }
1648
1649 EVENT_ATTR(cpu-cycles,                  CPU_CYCLES              );
1650 EVENT_ATTR(instructions,                INSTRUCTIONS            );
1651 EVENT_ATTR(cache-references,            CACHE_REFERENCES        );
1652 EVENT_ATTR(cache-misses,                CACHE_MISSES            );
1653 EVENT_ATTR(branch-instructions,         BRANCH_INSTRUCTIONS     );
1654 EVENT_ATTR(branch-misses,               BRANCH_MISSES           );
1655 EVENT_ATTR(bus-cycles,                  BUS_CYCLES              );
1656 EVENT_ATTR(stalled-cycles-frontend,     STALLED_CYCLES_FRONTEND );
1657 EVENT_ATTR(stalled-cycles-backend,      STALLED_CYCLES_BACKEND  );
1658 EVENT_ATTR(ref-cycles,                  REF_CPU_CYCLES          );
1659
1660 static struct attribute *empty_attrs;
1661
1662 static struct attribute *events_attr[] = {
1663         EVENT_PTR(CPU_CYCLES),
1664         EVENT_PTR(INSTRUCTIONS),
1665         EVENT_PTR(CACHE_REFERENCES),
1666         EVENT_PTR(CACHE_MISSES),
1667         EVENT_PTR(BRANCH_INSTRUCTIONS),
1668         EVENT_PTR(BRANCH_MISSES),
1669         EVENT_PTR(BUS_CYCLES),
1670         EVENT_PTR(STALLED_CYCLES_FRONTEND),
1671         EVENT_PTR(STALLED_CYCLES_BACKEND),
1672         EVENT_PTR(REF_CPU_CYCLES),
1673         NULL,
1674 };
1675
1676 static struct attribute_group x86_pmu_events_group = {
1677         .name = "events",
1678         .attrs = events_attr,
1679 };
1680
1681 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1682 {
1683         u64 umask  = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1684         u64 cmask  = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1685         bool edge  = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1686         bool pc    = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1687         bool any   = (config & ARCH_PERFMON_EVENTSEL_ANY);
1688         bool inv   = (config & ARCH_PERFMON_EVENTSEL_INV);
1689         ssize_t ret;
1690
1691         /*
1692         * We have whole page size to spend and just little data
1693         * to write, so we can safely use sprintf.
1694         */
1695         ret = sprintf(page, "event=0x%02llx", event);
1696
1697         if (umask)
1698                 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1699
1700         if (edge)
1701                 ret += sprintf(page + ret, ",edge");
1702
1703         if (pc)
1704                 ret += sprintf(page + ret, ",pc");
1705
1706         if (any)
1707                 ret += sprintf(page + ret, ",any");
1708
1709         if (inv)
1710                 ret += sprintf(page + ret, ",inv");
1711
1712         if (cmask)
1713                 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1714
1715         ret += sprintf(page + ret, "\n");
1716
1717         return ret;
1718 }
1719
1720 static int __init init_hw_perf_events(void)
1721 {
1722         struct x86_pmu_quirk *quirk;
1723         int err;
1724
1725         pr_info("Performance Events: ");
1726
1727         switch (boot_cpu_data.x86_vendor) {
1728         case X86_VENDOR_INTEL:
1729                 err = intel_pmu_init();
1730                 break;
1731         case X86_VENDOR_AMD:
1732                 err = amd_pmu_init();
1733                 break;
1734         default:
1735                 err = -ENOTSUPP;
1736         }
1737         if (err != 0) {
1738                 pr_cont("no PMU driver, software events only.\n");
1739                 return 0;
1740         }
1741
1742         pmu_check_apic();
1743
1744         /* sanity check that the hardware exists or is emulated */
1745         if (!check_hw_exists())
1746                 return 0;
1747
1748         pr_cont("%s PMU driver.\n", x86_pmu.name);
1749
1750         x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1751
1752         for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1753                 quirk->func();
1754
1755         if (!x86_pmu.intel_ctrl)
1756                 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1757
1758         perf_events_lapic_init();
1759         register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1760
1761         unconstrained = (struct event_constraint)
1762                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1763                                    0, x86_pmu.num_counters, 0, 0);
1764
1765         x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1766
1767         if (x86_pmu.event_attrs)
1768                 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1769
1770         if (!x86_pmu.events_sysfs_show)
1771                 x86_pmu_events_group.attrs = &empty_attrs;
1772         else
1773                 filter_events(x86_pmu_events_group.attrs);
1774
1775         if (x86_pmu.cpu_events) {
1776                 struct attribute **tmp;
1777
1778                 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1779                 if (!WARN_ON(!tmp))
1780                         x86_pmu_events_group.attrs = tmp;
1781         }
1782
1783         pr_info("... version:                %d\n",     x86_pmu.version);
1784         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1785         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1786         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1787         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1788         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1789         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1790
1791         /*
1792          * Install callbacks. Core will call them for each online
1793          * cpu.
1794          */
1795         err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
1796                                 x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
1797         if (err)
1798                 return err;
1799
1800         err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
1801                                 "AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
1802                                 x86_pmu_dying_cpu);
1803         if (err)
1804                 goto out;
1805
1806         err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
1807                                 x86_pmu_online_cpu, NULL);
1808         if (err)
1809                 goto out1;
1810
1811         err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1812         if (err)
1813                 goto out2;
1814
1815         return 0;
1816
1817 out2:
1818         cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
1819 out1:
1820         cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
1821 out:
1822         cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
1823         return err;
1824 }
1825 early_initcall(init_hw_perf_events);
1826
1827 static inline void x86_pmu_read(struct perf_event *event)
1828 {
1829         x86_perf_event_update(event);
1830 }
1831
1832 /*
1833  * Start group events scheduling transaction
1834  * Set the flag to make pmu::enable() not perform the
1835  * schedulability test, it will be performed at commit time
1836  *
1837  * We only support PERF_PMU_TXN_ADD transactions. Save the
1838  * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1839  * transactions.
1840  */
1841 static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1842 {
1843         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1844
1845         WARN_ON_ONCE(cpuc->txn_flags);          /* txn already in flight */
1846
1847         cpuc->txn_flags = txn_flags;
1848         if (txn_flags & ~PERF_PMU_TXN_ADD)
1849                 return;
1850
1851         perf_pmu_disable(pmu);
1852         __this_cpu_write(cpu_hw_events.n_txn, 0);
1853 }
1854
1855 /*
1856  * Stop group events scheduling transaction
1857  * Clear the flag and pmu::enable() will perform the
1858  * schedulability test.
1859  */
1860 static void x86_pmu_cancel_txn(struct pmu *pmu)
1861 {
1862         unsigned int txn_flags;
1863         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1864
1865         WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1866
1867         txn_flags = cpuc->txn_flags;
1868         cpuc->txn_flags = 0;
1869         if (txn_flags & ~PERF_PMU_TXN_ADD)
1870                 return;
1871
1872         /*
1873          * Truncate collected array by the number of events added in this
1874          * transaction. See x86_pmu_add() and x86_pmu_*_txn().
1875          */
1876         __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1877         __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1878         perf_pmu_enable(pmu);
1879 }
1880
1881 /*
1882  * Commit group events scheduling transaction
1883  * Perform the group schedulability test as a whole
1884  * Return 0 if success
1885  *
1886  * Does not cancel the transaction on failure; expects the caller to do this.
1887  */
1888 static int x86_pmu_commit_txn(struct pmu *pmu)
1889 {
1890         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1891         int assign[X86_PMC_IDX_MAX];
1892         int n, ret;
1893
1894         WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1895
1896         if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1897                 cpuc->txn_flags = 0;
1898                 return 0;
1899         }
1900
1901         n = cpuc->n_events;
1902
1903         if (!x86_pmu_initialized())
1904                 return -EAGAIN;
1905
1906         ret = x86_pmu.schedule_events(cpuc, n, assign);
1907         if (ret)
1908                 return ret;
1909
1910         /*
1911          * copy new assignment, now we know it is possible
1912          * will be used by hw_perf_enable()
1913          */
1914         memcpy(cpuc->assign, assign, n*sizeof(int));
1915
1916         cpuc->txn_flags = 0;
1917         perf_pmu_enable(pmu);
1918         return 0;
1919 }
1920 /*
1921  * a fake_cpuc is used to validate event groups. Due to
1922  * the extra reg logic, we need to also allocate a fake
1923  * per_core and per_cpu structure. Otherwise, group events
1924  * using extra reg may conflict without the kernel being
1925  * able to catch this when the last event gets added to
1926  * the group.
1927  */
1928 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1929 {
1930         kfree(cpuc->shared_regs);
1931         kfree(cpuc);
1932 }
1933
1934 static struct cpu_hw_events *allocate_fake_cpuc(void)
1935 {
1936         struct cpu_hw_events *cpuc;
1937         int cpu = raw_smp_processor_id();
1938
1939         cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1940         if (!cpuc)
1941                 return ERR_PTR(-ENOMEM);
1942
1943         /* only needed, if we have extra_regs */
1944         if (x86_pmu.extra_regs) {
1945                 cpuc->shared_regs = allocate_shared_regs(cpu);
1946                 if (!cpuc->shared_regs)
1947                         goto error;
1948         }
1949         cpuc->is_fake = 1;
1950         return cpuc;
1951 error:
1952         free_fake_cpuc(cpuc);
1953         return ERR_PTR(-ENOMEM);
1954 }
1955
1956 /*
1957  * validate that we can schedule this event
1958  */
1959 static int validate_event(struct perf_event *event)
1960 {
1961         struct cpu_hw_events *fake_cpuc;
1962         struct event_constraint *c;
1963         int ret = 0;
1964
1965         fake_cpuc = allocate_fake_cpuc();
1966         if (IS_ERR(fake_cpuc))
1967                 return PTR_ERR(fake_cpuc);
1968
1969         c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
1970
1971         if (!c || !c->weight)
1972                 ret = -EINVAL;
1973
1974         if (x86_pmu.put_event_constraints)
1975                 x86_pmu.put_event_constraints(fake_cpuc, event);
1976
1977         free_fake_cpuc(fake_cpuc);
1978
1979         return ret;
1980 }
1981
1982 /*
1983  * validate a single event group
1984  *
1985  * validation include:
1986  *      - check events are compatible which each other
1987  *      - events do not compete for the same counter
1988  *      - number of events <= number of counters
1989  *
1990  * validation ensures the group can be loaded onto the
1991  * PMU if it was the only group available.
1992  */
1993 static int validate_group(struct perf_event *event)
1994 {
1995         struct perf_event *leader = event->group_leader;
1996         struct cpu_hw_events *fake_cpuc;
1997         int ret = -EINVAL, n;
1998
1999         fake_cpuc = allocate_fake_cpuc();
2000         if (IS_ERR(fake_cpuc))
2001                 return PTR_ERR(fake_cpuc);
2002         /*
2003          * the event is not yet connected with its
2004          * siblings therefore we must first collect
2005          * existing siblings, then add the new event
2006          * before we can simulate the scheduling
2007          */
2008         n = collect_events(fake_cpuc, leader, true);
2009         if (n < 0)
2010                 goto out;
2011
2012         fake_cpuc->n_events = n;
2013         n = collect_events(fake_cpuc, event, false);
2014         if (n < 0)
2015                 goto out;
2016
2017         fake_cpuc->n_events = n;
2018
2019         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
2020
2021 out:
2022         free_fake_cpuc(fake_cpuc);
2023         return ret;
2024 }
2025
2026 static int x86_pmu_event_init(struct perf_event *event)
2027 {
2028         struct pmu *tmp;
2029         int err;
2030
2031         switch (event->attr.type) {
2032         case PERF_TYPE_RAW:
2033         case PERF_TYPE_HARDWARE:
2034         case PERF_TYPE_HW_CACHE:
2035                 break;
2036
2037         default:
2038                 return -ENOENT;
2039         }
2040
2041         err = __x86_pmu_event_init(event);
2042         if (!err) {
2043                 /*
2044                  * we temporarily connect event to its pmu
2045                  * such that validate_group() can classify
2046                  * it as an x86 event using is_x86_event()
2047                  */
2048                 tmp = event->pmu;
2049                 event->pmu = &pmu;
2050
2051                 if (event->group_leader != event)
2052                         err = validate_group(event);
2053                 else
2054                         err = validate_event(event);
2055
2056                 event->pmu = tmp;
2057         }
2058         if (err) {
2059                 if (event->destroy)
2060                         event->destroy(event);
2061         }
2062
2063         if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
2064                 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2065
2066         return err;
2067 }
2068
2069 static void refresh_pce(void *ignored)
2070 {
2071         if (current->mm)
2072                 load_mm_cr4(current->mm);
2073 }
2074
2075 static void x86_pmu_event_mapped(struct perf_event *event)
2076 {
2077         if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2078                 return;
2079
2080         if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2081                 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2082 }
2083
2084 static void x86_pmu_event_unmapped(struct perf_event *event)
2085 {
2086         if (!current->mm)
2087                 return;
2088
2089         if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2090                 return;
2091
2092         if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
2093                 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2094 }
2095
2096 static int x86_pmu_event_idx(struct perf_event *event)
2097 {
2098         int idx = event->hw.idx;
2099
2100         if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2101                 return 0;
2102
2103         if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2104                 idx -= INTEL_PMC_IDX_FIXED;
2105                 idx |= 1 << 30;
2106         }
2107
2108         return idx + 1;
2109 }
2110
2111 static ssize_t get_attr_rdpmc(struct device *cdev,
2112                               struct device_attribute *attr,
2113                               char *buf)
2114 {
2115         return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2116 }
2117
2118 static ssize_t set_attr_rdpmc(struct device *cdev,
2119                               struct device_attribute *attr,
2120                               const char *buf, size_t count)
2121 {
2122         unsigned long val;
2123         ssize_t ret;
2124
2125         ret = kstrtoul(buf, 0, &val);
2126         if (ret)
2127                 return ret;
2128
2129         if (val > 2)
2130                 return -EINVAL;
2131
2132         if (x86_pmu.attr_rdpmc_broken)
2133                 return -ENOTSUPP;
2134
2135         if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2136                 /*
2137                  * Changing into or out of always available, aka
2138                  * perf-event-bypassing mode.  This path is extremely slow,
2139                  * but only root can trigger it, so it's okay.
2140                  */
2141                 if (val == 2)
2142                         static_key_slow_inc(&rdpmc_always_available);
2143                 else
2144                         static_key_slow_dec(&rdpmc_always_available);
2145                 on_each_cpu(refresh_pce, NULL, 1);
2146         }
2147
2148         x86_pmu.attr_rdpmc = val;
2149
2150         return count;
2151 }
2152
2153 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2154
2155 static struct attribute *x86_pmu_attrs[] = {
2156         &dev_attr_rdpmc.attr,
2157         NULL,
2158 };
2159
2160 static struct attribute_group x86_pmu_attr_group = {
2161         .attrs = x86_pmu_attrs,
2162 };
2163
2164 static const struct attribute_group *x86_pmu_attr_groups[] = {
2165         &x86_pmu_attr_group,
2166         &x86_pmu_format_group,
2167         &x86_pmu_events_group,
2168         NULL,
2169 };
2170
2171 static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
2172 {
2173         if (x86_pmu.sched_task)
2174                 x86_pmu.sched_task(ctx, sched_in);
2175 }
2176
2177 void perf_check_microcode(void)
2178 {
2179         if (x86_pmu.check_microcode)
2180                 x86_pmu.check_microcode();
2181 }
2182 EXPORT_SYMBOL_GPL(perf_check_microcode);
2183
2184 static struct pmu pmu = {
2185         .pmu_enable             = x86_pmu_enable,
2186         .pmu_disable            = x86_pmu_disable,
2187
2188         .attr_groups            = x86_pmu_attr_groups,
2189
2190         .event_init             = x86_pmu_event_init,
2191
2192         .event_mapped           = x86_pmu_event_mapped,
2193         .event_unmapped         = x86_pmu_event_unmapped,
2194
2195         .add                    = x86_pmu_add,
2196         .del                    = x86_pmu_del,
2197         .start                  = x86_pmu_start,
2198         .stop                   = x86_pmu_stop,
2199         .read                   = x86_pmu_read,
2200
2201         .start_txn              = x86_pmu_start_txn,
2202         .cancel_txn             = x86_pmu_cancel_txn,
2203         .commit_txn             = x86_pmu_commit_txn,
2204
2205         .event_idx              = x86_pmu_event_idx,
2206         .sched_task             = x86_pmu_sched_task,
2207         .task_ctx_size          = sizeof(struct x86_perf_task_context),
2208 };
2209
2210 void arch_perf_update_userpage(struct perf_event *event,
2211                                struct perf_event_mmap_page *userpg, u64 now)
2212 {
2213         struct cyc2ns_data *data;
2214
2215         userpg->cap_user_time = 0;
2216         userpg->cap_user_time_zero = 0;
2217         userpg->cap_user_rdpmc =
2218                 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2219         userpg->pmc_width = x86_pmu.cntval_bits;
2220
2221         if (!sched_clock_stable())
2222                 return;
2223
2224         data = cyc2ns_read_begin();
2225
2226         /*
2227          * Internal timekeeping for enabled/running/stopped times
2228          * is always in the local_clock domain.
2229          */
2230         userpg->cap_user_time = 1;
2231         userpg->time_mult = data->cyc2ns_mul;
2232         userpg->time_shift = data->cyc2ns_shift;
2233         userpg->time_offset = data->cyc2ns_offset - now;
2234
2235         /*
2236          * cap_user_time_zero doesn't make sense when we're using a different
2237          * time base for the records.
2238          */
2239         if (!event->attr.use_clockid) {
2240                 userpg->cap_user_time_zero = 1;
2241                 userpg->time_zero = data->cyc2ns_offset;
2242         }
2243
2244         cyc2ns_read_end(data);
2245 }
2246
2247 /*
2248  * callchain support
2249  */
2250
2251 static int backtrace_stack(void *data, char *name)
2252 {
2253         return 0;
2254 }
2255
2256 static int backtrace_address(void *data, unsigned long addr, int reliable)
2257 {
2258         struct perf_callchain_entry_ctx *entry = data;
2259
2260         return perf_callchain_store(entry, addr);
2261 }
2262
2263 static const struct stacktrace_ops backtrace_ops = {
2264         .stack                  = backtrace_stack,
2265         .address                = backtrace_address,
2266         .walk_stack             = print_context_stack_bp,
2267 };
2268
2269 void
2270 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2271 {
2272         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2273                 /* TODO: We don't support guest os callchain now */
2274                 return;
2275         }
2276
2277         perf_callchain_store(entry, regs->ip);
2278
2279         dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
2280 }
2281
2282 static inline int
2283 valid_user_frame(const void __user *fp, unsigned long size)
2284 {
2285         return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2286 }
2287
2288 static unsigned long get_segment_base(unsigned int segment)
2289 {
2290         struct desc_struct *desc;
2291         int idx = segment >> 3;
2292
2293         if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2294 #ifdef CONFIG_MODIFY_LDT_SYSCALL
2295                 struct ldt_struct *ldt;
2296
2297                 if (idx > LDT_ENTRIES)
2298                         return 0;
2299
2300                 /* IRQs are off, so this synchronizes with smp_store_release */
2301                 ldt = lockless_dereference(current->active_mm->context.ldt);
2302                 if (!ldt || idx > ldt->size)
2303                         return 0;
2304
2305                 desc = &ldt->entries[idx];
2306 #else
2307                 return 0;
2308 #endif
2309         } else {
2310                 if (idx > GDT_ENTRIES)
2311                         return 0;
2312
2313                 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2314         }
2315
2316         return get_desc_base(desc);
2317 }
2318
2319 #ifdef CONFIG_IA32_EMULATION
2320
2321 #include <asm/compat.h>
2322
2323 static inline int
2324 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2325 {
2326         /* 32-bit process in 64-bit kernel. */
2327         unsigned long ss_base, cs_base;
2328         struct stack_frame_ia32 frame;
2329         const void __user *fp;
2330
2331         if (!test_thread_flag(TIF_IA32))
2332                 return 0;
2333
2334         cs_base = get_segment_base(regs->cs);
2335         ss_base = get_segment_base(regs->ss);
2336
2337         fp = compat_ptr(ss_base + regs->bp);
2338         pagefault_disable();
2339         while (entry->nr < entry->max_stack) {
2340                 unsigned long bytes;
2341                 frame.next_frame     = 0;
2342                 frame.return_address = 0;
2343
2344                 if (!access_ok(VERIFY_READ, fp, 8))
2345                         break;
2346
2347                 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2348                 if (bytes != 0)
2349                         break;
2350                 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
2351                 if (bytes != 0)
2352                         break;
2353
2354                 if (!valid_user_frame(fp, sizeof(frame)))
2355                         break;
2356
2357                 perf_callchain_store(entry, cs_base + frame.return_address);
2358                 fp = compat_ptr(ss_base + frame.next_frame);
2359         }
2360         pagefault_enable();
2361         return 1;
2362 }
2363 #else
2364 static inline int
2365 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2366 {
2367     return 0;
2368 }
2369 #endif
2370
2371 void
2372 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2373 {
2374         struct stack_frame frame;
2375         const unsigned long __user *fp;
2376
2377         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2378                 /* TODO: We don't support guest os callchain now */
2379                 return;
2380         }
2381
2382         /*
2383          * We don't know what to do with VM86 stacks.. ignore them for now.
2384          */
2385         if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2386                 return;
2387
2388         fp = (unsigned long __user *)regs->bp;
2389
2390         perf_callchain_store(entry, regs->ip);
2391
2392         if (!current->mm)
2393                 return;
2394
2395         if (perf_callchain_user32(regs, entry))
2396                 return;
2397
2398         pagefault_disable();
2399         while (entry->nr < entry->max_stack) {
2400                 unsigned long bytes;
2401
2402                 frame.next_frame             = NULL;
2403                 frame.return_address = 0;
2404
2405                 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
2406                         break;
2407
2408                 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
2409                 if (bytes != 0)
2410                         break;
2411                 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
2412                 if (bytes != 0)
2413                         break;
2414
2415                 if (!valid_user_frame(fp, sizeof(frame)))
2416                         break;
2417
2418                 perf_callchain_store(entry, frame.return_address);
2419                 fp = (void __user *)frame.next_frame;
2420         }
2421         pagefault_enable();
2422 }
2423
2424 /*
2425  * Deal with code segment offsets for the various execution modes:
2426  *
2427  *   VM86 - the good olde 16 bit days, where the linear address is
2428  *          20 bits and we use regs->ip + 0x10 * regs->cs.
2429  *
2430  *   IA32 - Where we need to look at GDT/LDT segment descriptor tables
2431  *          to figure out what the 32bit base address is.
2432  *
2433  *    X32 - has TIF_X32 set, but is running in x86_64
2434  *
2435  * X86_64 - CS,DS,SS,ES are all zero based.
2436  */
2437 static unsigned long code_segment_base(struct pt_regs *regs)
2438 {
2439         /*
2440          * For IA32 we look at the GDT/LDT segment base to convert the
2441          * effective IP to a linear address.
2442          */
2443
2444 #ifdef CONFIG_X86_32
2445         /*
2446          * If we are in VM86 mode, add the segment offset to convert to a
2447          * linear address.
2448          */
2449         if (regs->flags & X86_VM_MASK)
2450                 return 0x10 * regs->cs;
2451
2452         if (user_mode(regs) && regs->cs != __USER_CS)
2453                 return get_segment_base(regs->cs);
2454 #else
2455         if (user_mode(regs) && !user_64bit_mode(regs) &&
2456             regs->cs != __USER32_CS)
2457                 return get_segment_base(regs->cs);
2458 #endif
2459         return 0;
2460 }
2461
2462 unsigned long perf_instruction_pointer(struct pt_regs *regs)
2463 {
2464         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
2465                 return perf_guest_cbs->get_guest_ip();
2466
2467         return regs->ip + code_segment_base(regs);
2468 }
2469
2470 unsigned long perf_misc_flags(struct pt_regs *regs)
2471 {
2472         int misc = 0;
2473
2474         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2475                 if (perf_guest_cbs->is_user_mode())
2476                         misc |= PERF_RECORD_MISC_GUEST_USER;
2477                 else
2478                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2479         } else {
2480                 if (user_mode(regs))
2481                         misc |= PERF_RECORD_MISC_USER;
2482                 else
2483                         misc |= PERF_RECORD_MISC_KERNEL;
2484         }
2485
2486         if (regs->flags & PERF_EFLAGS_EXACT)
2487                 misc |= PERF_RECORD_MISC_EXACT_IP;
2488
2489         return misc;
2490 }
2491
2492 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2493 {
2494         cap->version            = x86_pmu.version;
2495         cap->num_counters_gp    = x86_pmu.num_counters;
2496         cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2497         cap->bit_width_gp       = x86_pmu.cntval_bits;
2498         cap->bit_width_fixed    = x86_pmu.cntval_bits;
2499         cap->events_mask        = (unsigned int)x86_pmu.events_maskl;
2500         cap->events_mask_len    = x86_pmu.events_mask_len;
2501 }
2502 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);