1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux performance counter support for MIPS.
5 * Copyright (C) 2010 MIPS Technologies, Inc.
6 * Copyright (C) 2011 Cavium Networks, Inc.
7 * Author: Deng-Cheng Zhu
9 * This code is based on the implementation for ARM, which is in turn
10 * based on the sparc64 perf event code and the x86 code. Performance
11 * counter access is based on the MIPS Oprofile code. And the callchain
12 * support references the code of MIPS stacktrace.c.
15 #include <linux/cpumask.h>
16 #include <linux/interrupt.h>
17 #include <linux/smp.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/uaccess.h>
23 #include <asm/irq_regs.h>
24 #include <asm/stacktrace.h>
25 #include <asm/time.h> /* For perf_irq */
27 #define MIPS_MAX_HWEVENTS 4
28 #define MIPS_TCS_PER_COUNTER 2
29 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
31 struct cpu_hw_events {
32 /* Array of events on this cpu. */
33 struct perf_event *events[MIPS_MAX_HWEVENTS];
36 * Set the bit (indexed by the counter number) when the counter
37 * is used for an event.
39 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
42 * Software copy of the control register for each performance counter.
43 * MIPS CPUs vary in performance counters. They use this differently,
44 * and even may not use it.
46 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
48 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
52 /* The description of MIPS performance events. */
53 struct mips_perf_event {
54 unsigned int event_id;
56 * MIPS performance counters are indexed starting from 0.
57 * CNTR_EVEN indicates the indexes of the counters to be used are
60 unsigned int cntr_mask;
61 #define CNTR_EVEN 0x55555555
62 #define CNTR_ODD 0xaaaaaaaa
63 #define CNTR_ALL 0xffffffff
71 static struct mips_perf_event raw_event;
72 static DEFINE_MUTEX(raw_event_mutex);
74 #define C(x) PERF_COUNT_HW_CACHE_##x
82 u64 (*read_counter)(unsigned int idx);
83 void (*write_counter)(unsigned int idx, u64 val);
84 const struct mips_perf_event *(*map_raw_event)(u64 config);
85 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
86 const struct mips_perf_event (*cache_event_map)
87 [PERF_COUNT_HW_CACHE_MAX]
88 [PERF_COUNT_HW_CACHE_OP_MAX]
89 [PERF_COUNT_HW_CACHE_RESULT_MAX];
90 unsigned int num_counters;
93 static struct mips_pmu mipspmu;
95 #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
97 #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
99 #ifdef CONFIG_CPU_BMIPS5000
100 #define M_PERFCTL_MT_EN(filter) 0
101 #else /* !CONFIG_CPU_BMIPS5000 */
102 #define M_PERFCTL_MT_EN(filter) (filter)
103 #endif /* CONFIG_CPU_BMIPS5000 */
105 #define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
106 #define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
107 #define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
109 #define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
115 #ifdef CONFIG_MIPS_MT_SMP
116 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
118 #define M_PERFCTL_CONFIG_MASK 0x1f
122 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
123 static DEFINE_RWLOCK(pmuint_rwlock);
125 #if defined(CONFIG_CPU_BMIPS5000)
126 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
127 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
129 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
130 0 : cpu_vpe_id(¤t_cpu_data))
133 /* Copied from op_model_mipsxx.c */
134 static unsigned int vpe_shift(void)
136 if (num_possible_cpus() > 1)
142 static unsigned int counters_total_to_per_cpu(unsigned int counters)
144 return counters >> vpe_shift();
147 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
150 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
152 static void resume_local_counters(void);
153 static void pause_local_counters(void);
154 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
155 static int mipsxx_pmu_handle_shared_irq(void);
157 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
164 static u64 mipsxx_pmu_read_counter(unsigned int idx)
166 idx = mipsxx_pmu_swizzle_perf_idx(idx);
171 * The counters are unsigned, we must cast to truncate
174 return (u32)read_c0_perfcntr0();
176 return (u32)read_c0_perfcntr1();
178 return (u32)read_c0_perfcntr2();
180 return (u32)read_c0_perfcntr3();
182 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
187 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
189 idx = mipsxx_pmu_swizzle_perf_idx(idx);
193 return read_c0_perfcntr0_64();
195 return read_c0_perfcntr1_64();
197 return read_c0_perfcntr2_64();
199 return read_c0_perfcntr3_64();
201 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
206 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
208 idx = mipsxx_pmu_swizzle_perf_idx(idx);
212 write_c0_perfcntr0(val);
215 write_c0_perfcntr1(val);
218 write_c0_perfcntr2(val);
221 write_c0_perfcntr3(val);
226 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
228 idx = mipsxx_pmu_swizzle_perf_idx(idx);
232 write_c0_perfcntr0_64(val);
235 write_c0_perfcntr1_64(val);
238 write_c0_perfcntr2_64(val);
241 write_c0_perfcntr3_64(val);
246 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
248 idx = mipsxx_pmu_swizzle_perf_idx(idx);
252 return read_c0_perfctrl0();
254 return read_c0_perfctrl1();
256 return read_c0_perfctrl2();
258 return read_c0_perfctrl3();
260 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
265 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
267 idx = mipsxx_pmu_swizzle_perf_idx(idx);
271 write_c0_perfctrl0(val);
274 write_c0_perfctrl1(val);
277 write_c0_perfctrl2(val);
280 write_c0_perfctrl3(val);
285 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
286 struct hw_perf_event *hwc)
291 * We only need to care the counter mask. The range has been
292 * checked definitely.
294 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
296 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
298 * Note that some MIPS perf events can be counted by both
299 * even and odd counters, wheresas many other are only by
300 * even _or_ odd counters. This introduces an issue that
301 * when the former kind of event takes the counter the
302 * latter kind of event wants to use, then the "counter
303 * allocation" for the latter event will fail. In fact if
304 * they can be dynamically swapped, they both feel happy.
305 * But here we leave this issue alone for now.
307 if (test_bit(i, &cntr_mask) &&
308 !test_and_set_bit(i, cpuc->used_mask))
315 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
317 struct perf_event *event = container_of(evt, struct perf_event, hw);
318 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
319 unsigned int range = evt->event_base >> 24;
321 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
323 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
324 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
325 /* Make sure interrupt enabled. */
328 if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
329 /* enable the counter for the calling thread */
330 cpuc->saved_ctrl[idx] |=
331 (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
332 } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
333 /* The counter is processor wide. Set it up to count all TCs. */
334 pr_debug("Enabling perf counter for all TCs\n");
335 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
337 unsigned int cpu, ctrl;
340 * Set up the counter for a particular CPU when event->cpu is
341 * a valid CPU number. Otherwise set up the counter for the CPU
342 * scheduling this thread.
344 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
346 ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
348 cpuc->saved_ctrl[idx] |= ctrl;
349 pr_debug("Enabling perf counter for CPU%d\n", cpu);
352 * We do not actually let the counter run. Leave it until start().
356 static void mipsxx_pmu_disable_event(int idx)
358 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
361 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
363 local_irq_save(flags);
364 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
365 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
366 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
367 local_irq_restore(flags);
370 static int mipspmu_event_set_period(struct perf_event *event,
371 struct hw_perf_event *hwc,
374 u64 left = local64_read(&hwc->period_left);
375 u64 period = hwc->sample_period;
378 if (unlikely((left + period) & (1ULL << 63))) {
379 /* left underflowed by more than period. */
381 local64_set(&hwc->period_left, left);
382 hwc->last_period = period;
384 } else if (unlikely((left + period) <= period)) {
385 /* left underflowed by less than period. */
387 local64_set(&hwc->period_left, left);
388 hwc->last_period = period;
392 if (left > mipspmu.max_period) {
393 left = mipspmu.max_period;
394 local64_set(&hwc->period_left, left);
397 local64_set(&hwc->prev_count, mipspmu.overflow - left);
399 mipspmu.write_counter(idx, mipspmu.overflow - left);
401 perf_event_update_userpage(event);
406 static void mipspmu_event_update(struct perf_event *event,
407 struct hw_perf_event *hwc,
410 u64 prev_raw_count, new_raw_count;
414 prev_raw_count = local64_read(&hwc->prev_count);
415 new_raw_count = mipspmu.read_counter(idx);
417 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
418 new_raw_count) != prev_raw_count)
421 delta = new_raw_count - prev_raw_count;
423 local64_add(delta, &event->count);
424 local64_sub(delta, &hwc->period_left);
427 static void mipspmu_start(struct perf_event *event, int flags)
429 struct hw_perf_event *hwc = &event->hw;
431 if (flags & PERF_EF_RELOAD)
432 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
436 /* Set the period for the event. */
437 mipspmu_event_set_period(event, hwc, hwc->idx);
439 /* Enable the event. */
440 mipsxx_pmu_enable_event(hwc, hwc->idx);
443 static void mipspmu_stop(struct perf_event *event, int flags)
445 struct hw_perf_event *hwc = &event->hw;
447 if (!(hwc->state & PERF_HES_STOPPED)) {
448 /* We are working on a local event. */
449 mipsxx_pmu_disable_event(hwc->idx);
451 mipspmu_event_update(event, hwc, hwc->idx);
452 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
456 static int mipspmu_add(struct perf_event *event, int flags)
458 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
459 struct hw_perf_event *hwc = &event->hw;
463 perf_pmu_disable(event->pmu);
465 /* To look for a free counter for this event. */
466 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
473 * If there is an event in the counter we are going to use then
474 * make sure it is disabled.
477 mipsxx_pmu_disable_event(idx);
478 cpuc->events[idx] = event;
480 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
481 if (flags & PERF_EF_START)
482 mipspmu_start(event, PERF_EF_RELOAD);
484 /* Propagate our changes to the userspace mapping. */
485 perf_event_update_userpage(event);
488 perf_pmu_enable(event->pmu);
492 static void mipspmu_del(struct perf_event *event, int flags)
494 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
495 struct hw_perf_event *hwc = &event->hw;
498 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
500 mipspmu_stop(event, PERF_EF_UPDATE);
501 cpuc->events[idx] = NULL;
502 clear_bit(idx, cpuc->used_mask);
504 perf_event_update_userpage(event);
507 static void mipspmu_read(struct perf_event *event)
509 struct hw_perf_event *hwc = &event->hw;
511 /* Don't read disabled counters! */
515 mipspmu_event_update(event, hwc, hwc->idx);
518 static void mipspmu_enable(struct pmu *pmu)
520 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
521 write_unlock(&pmuint_rwlock);
523 resume_local_counters();
527 * MIPS performance counters can be per-TC. The control registers can
528 * not be directly accessed across CPUs. Hence if we want to do global
529 * control, we need cross CPU calls. on_each_cpu() can help us, but we
530 * can not make sure this function is called with interrupts enabled. So
531 * here we pause local counters and then grab a rwlock and leave the
532 * counters on other CPUs alone. If any counter interrupt raises while
533 * we own the write lock, simply pause local counters on that CPU and
534 * spin in the handler. Also we know we won't be switched to another
535 * CPU after pausing local counters and before grabbing the lock.
537 static void mipspmu_disable(struct pmu *pmu)
539 pause_local_counters();
540 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
541 write_lock(&pmuint_rwlock);
545 static atomic_t active_events = ATOMIC_INIT(0);
546 static DEFINE_MUTEX(pmu_reserve_mutex);
547 static int (*save_perf_irq)(void);
549 static int mipspmu_get_irq(void)
553 if (mipspmu.irq >= 0) {
554 /* Request my own irq handler. */
555 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
556 IRQF_PERCPU | IRQF_NOBALANCING |
557 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
559 "mips_perf_pmu", &mipspmu);
561 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
564 } else if (cp0_perfcount_irq < 0) {
566 * We are sharing the irq number with the timer interrupt.
568 save_perf_irq = perf_irq;
569 perf_irq = mipsxx_pmu_handle_shared_irq;
572 pr_warn("The platform hasn't properly defined its interrupt controller\n");
579 static void mipspmu_free_irq(void)
581 if (mipspmu.irq >= 0)
582 free_irq(mipspmu.irq, &mipspmu);
583 else if (cp0_perfcount_irq < 0)
584 perf_irq = save_perf_irq;
588 * mipsxx/rm9000/loongson2 have different performance counters, they have
589 * specific low-level init routines.
591 static void reset_counters(void *arg);
592 static int __hw_perf_event_init(struct perf_event *event);
594 static void hw_perf_event_destroy(struct perf_event *event)
596 if (atomic_dec_and_mutex_lock(&active_events,
597 &pmu_reserve_mutex)) {
599 * We must not call the destroy function with interrupts
602 on_each_cpu(reset_counters,
603 (void *)(long)mipspmu.num_counters, 1);
605 mutex_unlock(&pmu_reserve_mutex);
609 static int mipspmu_event_init(struct perf_event *event)
613 /* does not support taken branch sampling */
614 if (has_branch_stack(event))
617 switch (event->attr.type) {
619 case PERF_TYPE_HARDWARE:
620 case PERF_TYPE_HW_CACHE:
627 if (event->cpu >= 0 && !cpu_online(event->cpu))
630 if (!atomic_inc_not_zero(&active_events)) {
631 mutex_lock(&pmu_reserve_mutex);
632 if (atomic_read(&active_events) == 0)
633 err = mipspmu_get_irq();
636 atomic_inc(&active_events);
637 mutex_unlock(&pmu_reserve_mutex);
643 return __hw_perf_event_init(event);
646 static struct pmu pmu = {
647 .pmu_enable = mipspmu_enable,
648 .pmu_disable = mipspmu_disable,
649 .event_init = mipspmu_event_init,
652 .start = mipspmu_start,
653 .stop = mipspmu_stop,
654 .read = mipspmu_read,
657 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
660 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
663 #ifdef CONFIG_MIPS_MT_SMP
664 if (num_possible_cpus() > 1)
665 return ((unsigned int)pev->range << 24) |
666 (pev->cntr_mask & 0xffff00) |
667 (pev->event_id & 0xff);
669 #endif /* CONFIG_MIPS_MT_SMP */
670 return ((pev->cntr_mask & 0xffff00) |
671 (pev->event_id & 0xff));
674 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
677 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
678 return ERR_PTR(-EOPNOTSUPP);
679 return &(*mipspmu.general_event_map)[idx];
682 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
684 unsigned int cache_type, cache_op, cache_result;
685 const struct mips_perf_event *pev;
687 cache_type = (config >> 0) & 0xff;
688 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
689 return ERR_PTR(-EINVAL);
691 cache_op = (config >> 8) & 0xff;
692 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
693 return ERR_PTR(-EINVAL);
695 cache_result = (config >> 16) & 0xff;
696 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
697 return ERR_PTR(-EINVAL);
699 pev = &((*mipspmu.cache_event_map)
704 if (pev->cntr_mask == 0)
705 return ERR_PTR(-EOPNOTSUPP);
711 static int validate_group(struct perf_event *event)
713 struct perf_event *sibling, *leader = event->group_leader;
714 struct cpu_hw_events fake_cpuc;
716 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
718 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
721 for_each_sibling_event(sibling, leader) {
722 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
726 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
732 /* This is needed by specific irq handlers in perf_event_*.c */
733 static void handle_associated_event(struct cpu_hw_events *cpuc,
734 int idx, struct perf_sample_data *data,
735 struct pt_regs *regs)
737 struct perf_event *event = cpuc->events[idx];
738 struct hw_perf_event *hwc = &event->hw;
740 mipspmu_event_update(event, hwc, idx);
741 data->period = event->hw.last_period;
742 if (!mipspmu_event_set_period(event, hwc, idx))
745 if (perf_event_overflow(event, data, regs))
746 mipsxx_pmu_disable_event(idx);
750 static int __n_counters(void)
754 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
756 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
758 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
764 static int n_counters(void)
768 switch (current_cpu_type()) {
780 counters = __n_counters();
786 static void reset_counters(void *arg)
788 int counters = (int)(long)arg;
791 mipsxx_pmu_write_control(3, 0);
792 mipspmu.write_counter(3, 0);
794 mipsxx_pmu_write_control(2, 0);
795 mipspmu.write_counter(2, 0);
797 mipsxx_pmu_write_control(1, 0);
798 mipspmu.write_counter(1, 0);
800 mipsxx_pmu_write_control(0, 0);
801 mipspmu.write_counter(0, 0);
805 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
806 static const struct mips_perf_event mipsxxcore_event_map
807 [PERF_COUNT_HW_MAX] = {
808 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
809 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
810 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
811 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
814 /* 74K/proAptiv core has different branch event code. */
815 static const struct mips_perf_event mipsxxcore_event_map2
816 [PERF_COUNT_HW_MAX] = {
817 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
818 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
819 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
820 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
823 static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
824 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
825 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
826 /* These only count dcache, not icache */
827 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
828 [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
829 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
830 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
833 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
834 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
835 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
836 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
837 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
840 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
841 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
842 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
843 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
844 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
845 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
846 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
847 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
850 static const struct mips_perf_event bmips5000_event_map
851 [PERF_COUNT_HW_MAX] = {
852 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
853 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
854 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
857 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
858 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
859 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
860 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
861 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
862 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
863 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
866 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
867 static const struct mips_perf_event mipsxxcore_cache_map
868 [PERF_COUNT_HW_CACHE_MAX]
869 [PERF_COUNT_HW_CACHE_OP_MAX]
870 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
873 * Like some other architectures (e.g. ARM), the performance
874 * counters don't differentiate between read and write
875 * accesses/misses, so this isn't strictly correct, but it's the
876 * best we can do. Writes and reads get combined.
879 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
880 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
883 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
884 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
889 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
890 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
893 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
894 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
897 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
899 * Note that MIPS has only "hit" events countable for
900 * the prefetch operation.
906 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
907 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
910 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
911 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
916 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
917 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
920 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
921 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
926 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
927 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
930 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
931 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
935 /* Using the same code for *HW_BRANCH* */
937 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
938 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
941 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
942 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
947 /* 74K/proAptiv core has completely different cache event map. */
948 static const struct mips_perf_event mipsxxcore_cache_map2
949 [PERF_COUNT_HW_CACHE_MAX]
950 [PERF_COUNT_HW_CACHE_OP_MAX]
951 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
954 * Like some other architectures (e.g. ARM), the performance
955 * counters don't differentiate between read and write
956 * accesses/misses, so this isn't strictly correct, but it's the
957 * best we can do. Writes and reads get combined.
960 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
961 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
964 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
965 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
970 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
971 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
974 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
975 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
978 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
980 * Note that MIPS has only "hit" events countable for
981 * the prefetch operation.
987 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
988 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
991 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
992 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
996 * 74K core does not have specific DTLB events. proAptiv core has
997 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
998 * not included here. One can use raw events if really needed.
1002 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1003 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1006 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1007 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1011 /* Using the same code for *HW_BRANCH* */
1013 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1014 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1017 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1018 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1023 static const struct mips_perf_event i6x00_cache_map
1024 [PERF_COUNT_HW_CACHE_MAX]
1025 [PERF_COUNT_HW_CACHE_OP_MAX]
1026 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1029 [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
1030 [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
1033 [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
1034 [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
1039 [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
1040 [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
1044 /* Can't distinguish read & write */
1046 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1047 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1050 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1051 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1055 /* Conditional branches / mispredicted */
1057 [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
1058 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
1063 static const struct mips_perf_event loongson3_cache_map
1064 [PERF_COUNT_HW_CACHE_MAX]
1065 [PERF_COUNT_HW_CACHE_OP_MAX]
1066 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1069 * Like some other architectures (e.g. ARM), the performance
1070 * counters don't differentiate between read and write
1071 * accesses/misses, so this isn't strictly correct, but it's the
1072 * best we can do. Writes and reads get combined.
1075 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1078 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1083 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1086 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1091 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1094 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1099 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1102 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1106 /* Using the same code for *HW_BRANCH* */
1108 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
1109 [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
1112 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
1113 [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
1119 static const struct mips_perf_event bmips5000_cache_map
1120 [PERF_COUNT_HW_CACHE_MAX]
1121 [PERF_COUNT_HW_CACHE_OP_MAX]
1122 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1125 * Like some other architectures (e.g. ARM), the performance
1126 * counters don't differentiate between read and write
1127 * accesses/misses, so this isn't strictly correct, but it's the
1128 * best we can do. Writes and reads get combined.
1131 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1132 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1135 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1136 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1141 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1142 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1145 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1146 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1148 [C(OP_PREFETCH)] = {
1149 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1151 * Note that MIPS has only "hit" events countable for
1152 * the prefetch operation.
1158 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1159 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1162 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1163 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1167 /* Using the same code for *HW_BRANCH* */
1169 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1172 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1178 static const struct mips_perf_event octeon_cache_map
1179 [PERF_COUNT_HW_CACHE_MAX]
1180 [PERF_COUNT_HW_CACHE_OP_MAX]
1181 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1184 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1185 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1188 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1193 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1195 [C(OP_PREFETCH)] = {
1196 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1201 * Only general DTLB misses are counted use the same event for
1205 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1208 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1213 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1218 static const struct mips_perf_event xlp_cache_map
1219 [PERF_COUNT_HW_CACHE_MAX]
1220 [PERF_COUNT_HW_CACHE_OP_MAX]
1221 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1224 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1225 [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1228 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1229 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1234 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1235 [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1240 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1241 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1244 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1245 [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1250 * Only general DTLB misses are counted use the same event for
1254 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1257 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1262 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1265 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1270 [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
1275 static int __hw_perf_event_init(struct perf_event *event)
1277 struct perf_event_attr *attr = &event->attr;
1278 struct hw_perf_event *hwc = &event->hw;
1279 const struct mips_perf_event *pev;
1282 /* Returning MIPS event descriptor for generic perf event. */
1283 if (PERF_TYPE_HARDWARE == event->attr.type) {
1284 if (event->attr.config >= PERF_COUNT_HW_MAX)
1286 pev = mipspmu_map_general_event(event->attr.config);
1287 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1288 pev = mipspmu_map_cache_event(event->attr.config);
1289 } else if (PERF_TYPE_RAW == event->attr.type) {
1290 /* We are working on the global raw event. */
1291 mutex_lock(&raw_event_mutex);
1292 pev = mipspmu.map_raw_event(event->attr.config);
1294 /* The event type is not (yet) supported. */
1299 if (PERF_TYPE_RAW == event->attr.type)
1300 mutex_unlock(&raw_event_mutex);
1301 return PTR_ERR(pev);
1305 * We allow max flexibility on how each individual counter shared
1306 * by the single CPU operates (the mode exclusion and the range).
1308 hwc->config_base = MIPS_PERFCTRL_IE;
1310 hwc->event_base = mipspmu_perf_event_encode(pev);
1311 if (PERF_TYPE_RAW == event->attr.type)
1312 mutex_unlock(&raw_event_mutex);
1314 if (!attr->exclude_user)
1315 hwc->config_base |= MIPS_PERFCTRL_U;
1316 if (!attr->exclude_kernel) {
1317 hwc->config_base |= MIPS_PERFCTRL_K;
1318 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1319 hwc->config_base |= MIPS_PERFCTRL_EXL;
1321 if (!attr->exclude_hv)
1322 hwc->config_base |= MIPS_PERFCTRL_S;
1324 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1326 * The event can belong to another cpu. We do not assign a local
1327 * counter for it for now.
1332 if (!hwc->sample_period) {
1333 hwc->sample_period = mipspmu.max_period;
1334 hwc->last_period = hwc->sample_period;
1335 local64_set(&hwc->period_left, hwc->sample_period);
1339 if (event->group_leader != event)
1340 err = validate_group(event);
1342 event->destroy = hw_perf_event_destroy;
1345 event->destroy(event);
1350 static void pause_local_counters(void)
1352 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1353 int ctr = mipspmu.num_counters;
1354 unsigned long flags;
1356 local_irq_save(flags);
1359 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1360 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1361 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1363 local_irq_restore(flags);
1366 static void resume_local_counters(void)
1368 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1369 int ctr = mipspmu.num_counters;
1373 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1377 static int mipsxx_pmu_handle_shared_irq(void)
1379 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1380 struct perf_sample_data data;
1381 unsigned int counters = mipspmu.num_counters;
1383 int handled = IRQ_NONE;
1384 struct pt_regs *regs;
1386 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1389 * First we pause the local counters, so that when we are locked
1390 * here, the counters are all paused. When it gets locked due to
1391 * perf_disable(), the timer interrupt handler will be delayed.
1393 * See also mipsxx_pmu_start().
1395 pause_local_counters();
1396 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1397 read_lock(&pmuint_rwlock);
1400 regs = get_irq_regs();
1402 perf_sample_data_init(&data, 0, 0);
1405 #define HANDLE_COUNTER(n) \
1407 if (test_bit(n, cpuc->used_mask)) { \
1408 counter = mipspmu.read_counter(n); \
1409 if (counter & mipspmu.overflow) { \
1410 handle_associated_event(cpuc, n, &data, regs); \
1411 handled = IRQ_HANDLED; \
1420 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1421 read_unlock(&pmuint_rwlock);
1423 resume_local_counters();
1426 * Do all the work for the pending perf events. We can do this
1427 * in here because the performance counter interrupt is a regular
1428 * interrupt, not NMI.
1430 if (handled == IRQ_HANDLED)
1436 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1438 return mipsxx_pmu_handle_shared_irq();
1442 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1443 ((b) == 0 || (b) == 1 || (b) == 11)
1446 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1447 ((b) == 0 || (b) == 1 || (b) == 11)
1448 #ifdef CONFIG_MIPS_MT_SMP
1449 #define IS_RANGE_P_34K_EVENT(r, b) \
1450 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1451 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1452 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1453 ((b) >= 64 && (b) <= 67))
1454 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1458 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1459 ((b) == 0 || (b) == 1)
1462 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1463 ((b) == 0 || (b) == 1)
1465 #define IS_BOTH_COUNTERS_P5600_EVENT(b) \
1466 ((b) == 0 || (b) == 1)
1469 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1470 ((b) == 0 || (b) == 1 || (b) == 11)
1471 #ifdef CONFIG_MIPS_MT_SMP
1472 #define IS_RANGE_P_1004K_EVENT(r, b) \
1473 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1474 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1475 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1476 (r) == 188 || (b) == 61 || (b) == 62 || \
1477 ((b) >= 64 && (b) <= 67))
1478 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1482 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1483 ((b) == 0 || (b) == 1 || (b) == 11)
1484 #ifdef CONFIG_MIPS_MT_SMP
1485 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1486 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1487 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1488 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1489 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1490 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1491 ((b) >= 64 && (b) <= 67))
1492 #define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1496 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1497 ((b) == 0 || (b) == 1)
1501 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1502 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1503 * indicate the even/odd bank selector. So, for example, when user wants to take
1504 * the Event Num of 15 for odd counters (by referring to the user manual), then
1505 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1508 * Some newer cores have even more events, in which case the user can use raw
1509 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1510 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1512 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1514 /* currently most cores have 7-bit event numbers */
1515 unsigned int raw_id = config & 0xff;
1516 unsigned int base_id = raw_id & 0x7f;
1518 switch (current_cpu_type()) {
1520 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1521 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1523 raw_event.cntr_mask =
1524 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1525 #ifdef CONFIG_MIPS_MT_SMP
1527 * This is actually doing nothing. Non-multithreading
1528 * CPUs will not check and calculate the range.
1530 raw_event.range = P;
1534 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1535 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1537 raw_event.cntr_mask =
1538 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1539 #ifdef CONFIG_MIPS_MT_SMP
1540 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1541 raw_event.range = P;
1542 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1543 raw_event.range = V;
1545 raw_event.range = T;
1550 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1551 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1553 raw_event.cntr_mask =
1554 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1555 #ifdef CONFIG_MIPS_MT_SMP
1556 raw_event.range = P;
1560 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1561 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1563 raw_event.cntr_mask =
1564 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1565 #ifdef CONFIG_MIPS_MT_SMP
1566 raw_event.range = P;
1571 /* 8-bit event numbers */
1572 raw_id = config & 0x1ff;
1573 base_id = raw_id & 0xff;
1574 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1575 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1577 raw_event.cntr_mask =
1578 raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1579 #ifdef CONFIG_MIPS_MT_SMP
1580 raw_event.range = P;
1585 /* 8-bit event numbers */
1586 base_id = config & 0xff;
1587 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1590 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1591 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1593 raw_event.cntr_mask =
1594 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1595 #ifdef CONFIG_MIPS_MT_SMP
1596 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1597 raw_event.range = P;
1598 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1599 raw_event.range = V;
1601 raw_event.range = T;
1604 case CPU_INTERAPTIV:
1605 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1606 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1608 raw_event.cntr_mask =
1609 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1610 #ifdef CONFIG_MIPS_MT_SMP
1611 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1612 raw_event.range = P;
1613 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1614 raw_event.range = V;
1616 raw_event.range = T;
1620 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1621 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1623 raw_event.cntr_mask =
1624 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1627 raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1631 raw_event.event_id = base_id;
1636 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1638 unsigned int raw_id = config & 0xff;
1639 unsigned int base_id = raw_id & 0x7f;
1642 raw_event.cntr_mask = CNTR_ALL;
1643 raw_event.event_id = base_id;
1645 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1647 return ERR_PTR(-EOPNOTSUPP);
1650 return ERR_PTR(-EOPNOTSUPP);
1661 return ERR_PTR(-EOPNOTSUPP);
1669 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1671 unsigned int raw_id = config & 0xff;
1673 /* Only 1-63 are defined */
1674 if ((raw_id < 0x01) || (raw_id > 0x3f))
1675 return ERR_PTR(-EOPNOTSUPP);
1677 raw_event.cntr_mask = CNTR_ALL;
1678 raw_event.event_id = raw_id;
1684 init_hw_perf_events(void)
1689 pr_info("Performance counters: ");
1691 counters = n_counters();
1692 if (counters == 0) {
1693 pr_cont("No available PMU.\n");
1697 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1698 if (!cpu_has_mipsmt_pertccounters)
1699 counters = counters_total_to_per_cpu(counters);
1702 if (get_c0_perfcount_int)
1703 irq = get_c0_perfcount_int();
1704 else if (cp0_perfcount_irq >= 0)
1705 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1709 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1711 switch (current_cpu_type()) {
1713 mipspmu.name = "mips/24K";
1714 mipspmu.general_event_map = &mipsxxcore_event_map;
1715 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1718 mipspmu.name = "mips/34K";
1719 mipspmu.general_event_map = &mipsxxcore_event_map;
1720 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1723 mipspmu.name = "mips/74K";
1724 mipspmu.general_event_map = &mipsxxcore_event_map2;
1725 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1728 mipspmu.name = "mips/proAptiv";
1729 mipspmu.general_event_map = &mipsxxcore_event_map2;
1730 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1733 mipspmu.name = "mips/P5600";
1734 mipspmu.general_event_map = &mipsxxcore_event_map2;
1735 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1738 mipspmu.name = "mips/P6600";
1739 mipspmu.general_event_map = &mipsxxcore_event_map2;
1740 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1743 mipspmu.name = "mips/I6400";
1744 mipspmu.general_event_map = &i6x00_event_map;
1745 mipspmu.cache_event_map = &i6x00_cache_map;
1748 mipspmu.name = "mips/I6500";
1749 mipspmu.general_event_map = &i6x00_event_map;
1750 mipspmu.cache_event_map = &i6x00_cache_map;
1753 mipspmu.name = "mips/1004K";
1754 mipspmu.general_event_map = &mipsxxcore_event_map;
1755 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1758 mipspmu.name = "mips/1074K";
1759 mipspmu.general_event_map = &mipsxxcore_event_map;
1760 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1762 case CPU_INTERAPTIV:
1763 mipspmu.name = "mips/interAptiv";
1764 mipspmu.general_event_map = &mipsxxcore_event_map;
1765 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1768 mipspmu.name = "mips/loongson1";
1769 mipspmu.general_event_map = &mipsxxcore_event_map;
1770 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1773 mipspmu.name = "mips/loongson3";
1774 mipspmu.general_event_map = &loongson3_event_map;
1775 mipspmu.cache_event_map = &loongson3_cache_map;
1777 case CPU_CAVIUM_OCTEON:
1778 case CPU_CAVIUM_OCTEON_PLUS:
1779 case CPU_CAVIUM_OCTEON2:
1780 mipspmu.name = "octeon";
1781 mipspmu.general_event_map = &octeon_event_map;
1782 mipspmu.cache_event_map = &octeon_cache_map;
1783 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1786 mipspmu.name = "BMIPS5000";
1787 mipspmu.general_event_map = &bmips5000_event_map;
1788 mipspmu.cache_event_map = &bmips5000_cache_map;
1791 mipspmu.name = "xlp";
1792 mipspmu.general_event_map = &xlp_event_map;
1793 mipspmu.cache_event_map = &xlp_cache_map;
1794 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1797 pr_cont("Either hardware does not support performance "
1798 "counters, or not yet implemented.\n");
1802 mipspmu.num_counters = counters;
1805 if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
1806 mipspmu.max_period = (1ULL << 63) - 1;
1807 mipspmu.valid_count = (1ULL << 63) - 1;
1808 mipspmu.overflow = 1ULL << 63;
1809 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1810 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1813 mipspmu.max_period = (1ULL << 31) - 1;
1814 mipspmu.valid_count = (1ULL << 31) - 1;
1815 mipspmu.overflow = 1ULL << 31;
1816 mipspmu.read_counter = mipsxx_pmu_read_counter;
1817 mipspmu.write_counter = mipsxx_pmu_write_counter;
1821 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1823 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1824 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1825 irq < 0 ? " (share with timer interrupt)" : "");
1827 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1831 early_initcall(init_hw_perf_events);