1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/errno.h>
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
8 #include <linux/perf_event.h>
9 #include <linux/platform_device.h>
11 #define CSKY_PMU_MAX_EVENTS 32
13 #define HPCR "<0, 0x0>" /* PMU Control reg */
14 #define HPCNTENR "<0, 0x4>" /* Count Enable reg */
16 static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void);
17 static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val);
27 asm volatile("cprgr %0, "reg"\n" \
34 #define cpwgr(reg, val) \
46 asm volatile("cprcr %0, "reg"\n" \
53 #define cpwcr(reg, val) \
63 static uint64_t csky_pmu_read_cc(void)
69 tmp = cprgr("<0, 0x3>");
70 lo = cprgr("<0, 0x2>");
71 hi = cprgr("<0, 0x3>");
74 result = (uint64_t) (hi) << 32;
80 static void csky_pmu_write_cc(uint64_t val)
82 cpwgr("<0, 0x2>", (uint32_t) val);
83 cpwgr("<0, 0x3>", (uint32_t) (val >> 32));
86 /* instruction counter */
87 static uint64_t csky_pmu_read_ic(void)
93 tmp = cprgr("<0, 0x5>");
94 lo = cprgr("<0, 0x4>");
95 hi = cprgr("<0, 0x5>");
98 result = (uint64_t) (hi) << 32;
104 static void csky_pmu_write_ic(uint64_t val)
106 cpwgr("<0, 0x4>", (uint32_t) val);
107 cpwgr("<0, 0x5>", (uint32_t) (val >> 32));
110 /* l1 icache access counter */
111 static uint64_t csky_pmu_read_icac(void)
113 uint32_t lo, hi, tmp;
117 tmp = cprgr("<0, 0x7>");
118 lo = cprgr("<0, 0x6>");
119 hi = cprgr("<0, 0x7>");
122 result = (uint64_t) (hi) << 32;
128 static void csky_pmu_write_icac(uint64_t val)
130 cpwgr("<0, 0x6>", (uint32_t) val);
131 cpwgr("<0, 0x7>", (uint32_t) (val >> 32));
134 /* l1 icache miss counter */
135 static uint64_t csky_pmu_read_icmc(void)
137 uint32_t lo, hi, tmp;
141 tmp = cprgr("<0, 0x9>");
142 lo = cprgr("<0, 0x8>");
143 hi = cprgr("<0, 0x9>");
146 result = (uint64_t) (hi) << 32;
152 static void csky_pmu_write_icmc(uint64_t val)
154 cpwgr("<0, 0x8>", (uint32_t) val);
155 cpwgr("<0, 0x9>", (uint32_t) (val >> 32));
158 /* l1 dcache access counter */
159 static uint64_t csky_pmu_read_dcac(void)
161 uint32_t lo, hi, tmp;
165 tmp = cprgr("<0, 0xb>");
166 lo = cprgr("<0, 0xa>");
167 hi = cprgr("<0, 0xb>");
170 result = (uint64_t) (hi) << 32;
176 static void csky_pmu_write_dcac(uint64_t val)
178 cpwgr("<0, 0xa>", (uint32_t) val);
179 cpwgr("<0, 0xb>", (uint32_t) (val >> 32));
182 /* l1 dcache miss counter */
183 static uint64_t csky_pmu_read_dcmc(void)
185 uint32_t lo, hi, tmp;
189 tmp = cprgr("<0, 0xd>");
190 lo = cprgr("<0, 0xc>");
191 hi = cprgr("<0, 0xd>");
194 result = (uint64_t) (hi) << 32;
200 static void csky_pmu_write_dcmc(uint64_t val)
202 cpwgr("<0, 0xc>", (uint32_t) val);
203 cpwgr("<0, 0xd>", (uint32_t) (val >> 32));
206 /* l2 cache access counter */
207 static uint64_t csky_pmu_read_l2ac(void)
209 uint32_t lo, hi, tmp;
213 tmp = cprgr("<0, 0xf>");
214 lo = cprgr("<0, 0xe>");
215 hi = cprgr("<0, 0xf>");
218 result = (uint64_t) (hi) << 32;
224 static void csky_pmu_write_l2ac(uint64_t val)
226 cpwgr("<0, 0xe>", (uint32_t) val);
227 cpwgr("<0, 0xf>", (uint32_t) (val >> 32));
230 /* l2 cache miss counter */
231 static uint64_t csky_pmu_read_l2mc(void)
233 uint32_t lo, hi, tmp;
237 tmp = cprgr("<0, 0x11>");
238 lo = cprgr("<0, 0x10>");
239 hi = cprgr("<0, 0x11>");
242 result = (uint64_t) (hi) << 32;
248 static void csky_pmu_write_l2mc(uint64_t val)
250 cpwgr("<0, 0x10>", (uint32_t) val);
251 cpwgr("<0, 0x11>", (uint32_t) (val >> 32));
254 /* I-UTLB miss counter */
255 static uint64_t csky_pmu_read_iutlbmc(void)
257 uint32_t lo, hi, tmp;
261 tmp = cprgr("<0, 0x15>");
262 lo = cprgr("<0, 0x14>");
263 hi = cprgr("<0, 0x15>");
266 result = (uint64_t) (hi) << 32;
272 static void csky_pmu_write_iutlbmc(uint64_t val)
274 cpwgr("<0, 0x14>", (uint32_t) val);
275 cpwgr("<0, 0x15>", (uint32_t) (val >> 32));
278 /* D-UTLB miss counter */
279 static uint64_t csky_pmu_read_dutlbmc(void)
281 uint32_t lo, hi, tmp;
285 tmp = cprgr("<0, 0x17>");
286 lo = cprgr("<0, 0x16>");
287 hi = cprgr("<0, 0x17>");
290 result = (uint64_t) (hi) << 32;
296 static void csky_pmu_write_dutlbmc(uint64_t val)
298 cpwgr("<0, 0x16>", (uint32_t) val);
299 cpwgr("<0, 0x17>", (uint32_t) (val >> 32));
302 /* JTLB miss counter */
303 static uint64_t csky_pmu_read_jtlbmc(void)
305 uint32_t lo, hi, tmp;
309 tmp = cprgr("<0, 0x19>");
310 lo = cprgr("<0, 0x18>");
311 hi = cprgr("<0, 0x19>");
314 result = (uint64_t) (hi) << 32;
320 static void csky_pmu_write_jtlbmc(uint64_t val)
322 cpwgr("<0, 0x18>", (uint32_t) val);
323 cpwgr("<0, 0x19>", (uint32_t) (val >> 32));
326 /* software counter */
327 static uint64_t csky_pmu_read_softc(void)
329 uint32_t lo, hi, tmp;
333 tmp = cprgr("<0, 0x1b>");
334 lo = cprgr("<0, 0x1a>");
335 hi = cprgr("<0, 0x1b>");
338 result = (uint64_t) (hi) << 32;
344 static void csky_pmu_write_softc(uint64_t val)
346 cpwgr("<0, 0x1a>", (uint32_t) val);
347 cpwgr("<0, 0x1b>", (uint32_t) (val >> 32));
350 /* conditional branch mispredict counter */
351 static uint64_t csky_pmu_read_cbmc(void)
353 uint32_t lo, hi, tmp;
357 tmp = cprgr("<0, 0x1d>");
358 lo = cprgr("<0, 0x1c>");
359 hi = cprgr("<0, 0x1d>");
362 result = (uint64_t) (hi) << 32;
368 static void csky_pmu_write_cbmc(uint64_t val)
370 cpwgr("<0, 0x1c>", (uint32_t) val);
371 cpwgr("<0, 0x1d>", (uint32_t) (val >> 32));
374 /* conditional branch instruction counter */
375 static uint64_t csky_pmu_read_cbic(void)
377 uint32_t lo, hi, tmp;
381 tmp = cprgr("<0, 0x1f>");
382 lo = cprgr("<0, 0x1e>");
383 hi = cprgr("<0, 0x1f>");
386 result = (uint64_t) (hi) << 32;
392 static void csky_pmu_write_cbic(uint64_t val)
394 cpwgr("<0, 0x1e>", (uint32_t) val);
395 cpwgr("<0, 0x1f>", (uint32_t) (val >> 32));
398 /* indirect branch mispredict counter */
399 static uint64_t csky_pmu_read_ibmc(void)
401 uint32_t lo, hi, tmp;
405 tmp = cprgr("<0, 0x21>");
406 lo = cprgr("<0, 0x20>");
407 hi = cprgr("<0, 0x21>");
410 result = (uint64_t) (hi) << 32;
416 static void csky_pmu_write_ibmc(uint64_t val)
418 cpwgr("<0, 0x20>", (uint32_t) val);
419 cpwgr("<0, 0x21>", (uint32_t) (val >> 32));
422 /* indirect branch instruction counter */
423 static uint64_t csky_pmu_read_ibic(void)
425 uint32_t lo, hi, tmp;
429 tmp = cprgr("<0, 0x23>");
430 lo = cprgr("<0, 0x22>");
431 hi = cprgr("<0, 0x23>");
434 result = (uint64_t) (hi) << 32;
440 static void csky_pmu_write_ibic(uint64_t val)
442 cpwgr("<0, 0x22>", (uint32_t) val);
443 cpwgr("<0, 0x23>", (uint32_t) (val >> 32));
446 /* LSU spec fail counter */
447 static uint64_t csky_pmu_read_lsfc(void)
449 uint32_t lo, hi, tmp;
453 tmp = cprgr("<0, 0x25>");
454 lo = cprgr("<0, 0x24>");
455 hi = cprgr("<0, 0x25>");
458 result = (uint64_t) (hi) << 32;
464 static void csky_pmu_write_lsfc(uint64_t val)
466 cpwgr("<0, 0x24>", (uint32_t) val);
467 cpwgr("<0, 0x25>", (uint32_t) (val >> 32));
470 /* store instruction counter */
471 static uint64_t csky_pmu_read_sic(void)
473 uint32_t lo, hi, tmp;
477 tmp = cprgr("<0, 0x27>");
478 lo = cprgr("<0, 0x26>");
479 hi = cprgr("<0, 0x27>");
482 result = (uint64_t) (hi) << 32;
488 static void csky_pmu_write_sic(uint64_t val)
490 cpwgr("<0, 0x26>", (uint32_t) val);
491 cpwgr("<0, 0x27>", (uint32_t) (val >> 32));
494 /* dcache read access counter */
495 static uint64_t csky_pmu_read_dcrac(void)
497 uint32_t lo, hi, tmp;
501 tmp = cprgr("<0, 0x29>");
502 lo = cprgr("<0, 0x28>");
503 hi = cprgr("<0, 0x29>");
506 result = (uint64_t) (hi) << 32;
512 static void csky_pmu_write_dcrac(uint64_t val)
514 cpwgr("<0, 0x28>", (uint32_t) val);
515 cpwgr("<0, 0x29>", (uint32_t) (val >> 32));
518 /* dcache read miss counter */
519 static uint64_t csky_pmu_read_dcrmc(void)
521 uint32_t lo, hi, tmp;
525 tmp = cprgr("<0, 0x2b>");
526 lo = cprgr("<0, 0x2a>");
527 hi = cprgr("<0, 0x2b>");
530 result = (uint64_t) (hi) << 32;
536 static void csky_pmu_write_dcrmc(uint64_t val)
538 cpwgr("<0, 0x2a>", (uint32_t) val);
539 cpwgr("<0, 0x2b>", (uint32_t) (val >> 32));
542 /* dcache write access counter */
543 static uint64_t csky_pmu_read_dcwac(void)
545 uint32_t lo, hi, tmp;
549 tmp = cprgr("<0, 0x2d>");
550 lo = cprgr("<0, 0x2c>");
551 hi = cprgr("<0, 0x2d>");
554 result = (uint64_t) (hi) << 32;
560 static void csky_pmu_write_dcwac(uint64_t val)
562 cpwgr("<0, 0x2c>", (uint32_t) val);
563 cpwgr("<0, 0x2d>", (uint32_t) (val >> 32));
566 /* dcache write miss counter */
567 static uint64_t csky_pmu_read_dcwmc(void)
569 uint32_t lo, hi, tmp;
573 tmp = cprgr("<0, 0x2f>");
574 lo = cprgr("<0, 0x2e>");
575 hi = cprgr("<0, 0x2f>");
578 result = (uint64_t) (hi) << 32;
584 static void csky_pmu_write_dcwmc(uint64_t val)
586 cpwgr("<0, 0x2e>", (uint32_t) val);
587 cpwgr("<0, 0x2f>", (uint32_t) (val >> 32));
590 /* l2cache read access counter */
591 static uint64_t csky_pmu_read_l2rac(void)
593 uint32_t lo, hi, tmp;
597 tmp = cprgr("<0, 0x31>");
598 lo = cprgr("<0, 0x30>");
599 hi = cprgr("<0, 0x31>");
602 result = (uint64_t) (hi) << 32;
608 static void csky_pmu_write_l2rac(uint64_t val)
610 cpwgr("<0, 0x30>", (uint32_t) val);
611 cpwgr("<0, 0x31>", (uint32_t) (val >> 32));
614 /* l2cache read miss counter */
615 static uint64_t csky_pmu_read_l2rmc(void)
617 uint32_t lo, hi, tmp;
621 tmp = cprgr("<0, 0x33>");
622 lo = cprgr("<0, 0x32>");
623 hi = cprgr("<0, 0x33>");
626 result = (uint64_t) (hi) << 32;
632 static void csky_pmu_write_l2rmc(uint64_t val)
634 cpwgr("<0, 0x32>", (uint32_t) val);
635 cpwgr("<0, 0x33>", (uint32_t) (val >> 32));
638 /* l2cache write access counter */
639 static uint64_t csky_pmu_read_l2wac(void)
641 uint32_t lo, hi, tmp;
645 tmp = cprgr("<0, 0x35>");
646 lo = cprgr("<0, 0x34>");
647 hi = cprgr("<0, 0x35>");
650 result = (uint64_t) (hi) << 32;
656 static void csky_pmu_write_l2wac(uint64_t val)
658 cpwgr("<0, 0x34>", (uint32_t) val);
659 cpwgr("<0, 0x35>", (uint32_t) (val >> 32));
662 /* l2cache write miss counter */
663 static uint64_t csky_pmu_read_l2wmc(void)
665 uint32_t lo, hi, tmp;
669 tmp = cprgr("<0, 0x37>");
670 lo = cprgr("<0, 0x36>");
671 hi = cprgr("<0, 0x37>");
674 result = (uint64_t) (hi) << 32;
680 static void csky_pmu_write_l2wmc(uint64_t val)
682 cpwgr("<0, 0x36>", (uint32_t) val);
683 cpwgr("<0, 0x37>", (uint32_t) (val >> 32));
686 #define HW_OP_UNSUPPORTED 0xffff
687 static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = {
688 [PERF_COUNT_HW_CPU_CYCLES] = 0x1,
689 [PERF_COUNT_HW_INSTRUCTIONS] = 0x2,
690 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
691 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
692 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf,
693 [PERF_COUNT_HW_BRANCH_MISSES] = 0xe,
694 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
695 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
696 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
697 [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED,
700 #define C(_x) PERF_COUNT_HW_CACHE_##_x
701 #define CACHE_OP_UNSUPPORTED 0xffff
702 static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
705 [C(RESULT_ACCESS)] = 0x14,
706 [C(RESULT_MISS)] = 0x15,
709 [C(RESULT_ACCESS)] = 0x16,
710 [C(RESULT_MISS)] = 0x17,
713 [C(RESULT_ACCESS)] = 0x5,
714 [C(RESULT_MISS)] = 0x6,
719 [C(RESULT_ACCESS)] = 0x3,
720 [C(RESULT_MISS)] = 0x4,
723 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
724 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
727 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
728 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
733 [C(RESULT_ACCESS)] = 0x18,
734 [C(RESULT_MISS)] = 0x19,
737 [C(RESULT_ACCESS)] = 0x1a,
738 [C(RESULT_MISS)] = 0x1b,
741 [C(RESULT_ACCESS)] = 0x7,
742 [C(RESULT_MISS)] = 0x8,
747 [C(RESULT_ACCESS)] = 0x5,
748 [C(RESULT_MISS)] = 0xb,
751 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
752 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
755 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
756 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
761 [C(RESULT_ACCESS)] = 0x3,
762 [C(RESULT_MISS)] = 0xa,
765 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
766 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
769 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
770 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
775 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
776 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
779 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
780 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
783 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
784 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
789 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
790 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
793 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
794 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
797 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
798 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
803 static void csky_perf_event_update(struct perf_event *event,
804 struct hw_perf_event *hwc)
806 uint64_t prev_raw_count = local64_read(&hwc->prev_count);
807 uint64_t new_raw_count = hw_raw_read_mapping[hwc->idx]();
808 int64_t delta = new_raw_count - prev_raw_count;
811 * We aren't afraid of hwc->prev_count changing beneath our feet
812 * because there's no way for us to re-enter this function anytime.
814 local64_set(&hwc->prev_count, new_raw_count);
815 local64_add(delta, &event->count);
816 local64_sub(delta, &hwc->period_left);
819 static void csky_pmu_read(struct perf_event *event)
821 csky_perf_event_update(event, &event->hw);
824 static int csky_pmu_cache_event(u64 config)
826 unsigned int cache_type, cache_op, cache_result;
828 cache_type = (config >> 0) & 0xff;
829 cache_op = (config >> 8) & 0xff;
830 cache_result = (config >> 16) & 0xff;
832 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
834 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
836 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
839 return csky_pmu_cache_map[cache_type][cache_op][cache_result];
842 static int csky_pmu_event_init(struct perf_event *event)
844 struct hw_perf_event *hwc = &event->hw;
847 if (event->attr.exclude_user)
848 csky_pmu.hpcr = BIT(2);
849 else if (event->attr.exclude_kernel)
850 csky_pmu.hpcr = BIT(3);
852 csky_pmu.hpcr = BIT(2) | BIT(3);
854 csky_pmu.hpcr |= BIT(1) | BIT(0);
856 switch (event->attr.type) {
857 case PERF_TYPE_HARDWARE:
858 if (event->attr.config >= PERF_COUNT_HW_MAX)
860 ret = csky_pmu_hw_map[event->attr.config];
861 if (ret == HW_OP_UNSUPPORTED)
865 case PERF_TYPE_HW_CACHE:
866 ret = csky_pmu_cache_event(event->attr.config);
867 if (ret == CACHE_OP_UNSUPPORTED)
872 if (hw_raw_read_mapping[event->attr.config] == NULL)
874 hwc->idx = event->attr.config;
881 /* starts all counters */
882 static void csky_pmu_enable(struct pmu *pmu)
884 cpwcr(HPCR, csky_pmu.hpcr);
887 /* stops all counters */
888 static void csky_pmu_disable(struct pmu *pmu)
893 static void csky_pmu_start(struct perf_event *event, int flags)
895 struct hw_perf_event *hwc = &event->hw;
898 if (WARN_ON_ONCE(idx == -1))
901 if (flags & PERF_EF_RELOAD)
902 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
906 cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR));
909 static void csky_pmu_stop(struct perf_event *event, int flags)
911 struct hw_perf_event *hwc = &event->hw;
914 if (!(event->hw.state & PERF_HES_STOPPED)) {
915 cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR));
916 event->hw.state |= PERF_HES_STOPPED;
919 if ((flags & PERF_EF_UPDATE) &&
920 !(event->hw.state & PERF_HES_UPTODATE)) {
921 csky_perf_event_update(event, &event->hw);
922 event->hw.state |= PERF_HES_UPTODATE;
926 static void csky_pmu_del(struct perf_event *event, int flags)
928 csky_pmu_stop(event, PERF_EF_UPDATE);
930 perf_event_update_userpage(event);
933 /* allocate hardware counter and optionally start counting */
934 static int csky_pmu_add(struct perf_event *event, int flags)
936 struct hw_perf_event *hwc = &event->hw;
938 local64_set(&hwc->prev_count, 0);
940 if (hw_raw_write_mapping[hwc->idx] != NULL)
941 hw_raw_write_mapping[hwc->idx](0);
943 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
944 if (flags & PERF_EF_START)
945 csky_pmu_start(event, PERF_EF_RELOAD);
947 perf_event_update_userpage(event);
952 int __init init_hw_perf_events(void)
954 csky_pmu.pmu = (struct pmu) {
955 .pmu_enable = csky_pmu_enable,
956 .pmu_disable = csky_pmu_disable,
957 .event_init = csky_pmu_event_init,
960 .start = csky_pmu_start,
961 .stop = csky_pmu_stop,
962 .read = csky_pmu_read,
965 memset((void *)hw_raw_read_mapping, 0,
966 sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS]));
968 hw_raw_read_mapping[0x1] = csky_pmu_read_cc;
969 hw_raw_read_mapping[0x2] = csky_pmu_read_ic;
970 hw_raw_read_mapping[0x3] = csky_pmu_read_icac;
971 hw_raw_read_mapping[0x4] = csky_pmu_read_icmc;
972 hw_raw_read_mapping[0x5] = csky_pmu_read_dcac;
973 hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc;
974 hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac;
975 hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc;
976 hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc;
977 hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc;
978 hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc;
979 hw_raw_read_mapping[0xd] = csky_pmu_read_softc;
980 hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc;
981 hw_raw_read_mapping[0xf] = csky_pmu_read_cbic;
982 hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc;
983 hw_raw_read_mapping[0x11] = csky_pmu_read_ibic;
984 hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc;
985 hw_raw_read_mapping[0x13] = csky_pmu_read_sic;
986 hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac;
987 hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc;
988 hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac;
989 hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc;
990 hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac;
991 hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc;
992 hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac;
993 hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc;
995 memset((void *)hw_raw_write_mapping, 0,
996 sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS]));
998 hw_raw_write_mapping[0x1] = csky_pmu_write_cc;
999 hw_raw_write_mapping[0x2] = csky_pmu_write_ic;
1000 hw_raw_write_mapping[0x3] = csky_pmu_write_icac;
1001 hw_raw_write_mapping[0x4] = csky_pmu_write_icmc;
1002 hw_raw_write_mapping[0x5] = csky_pmu_write_dcac;
1003 hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc;
1004 hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac;
1005 hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc;
1006 hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc;
1007 hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc;
1008 hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc;
1009 hw_raw_write_mapping[0xd] = csky_pmu_write_softc;
1010 hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc;
1011 hw_raw_write_mapping[0xf] = csky_pmu_write_cbic;
1012 hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc;
1013 hw_raw_write_mapping[0x11] = csky_pmu_write_ibic;
1014 hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc;
1015 hw_raw_write_mapping[0x13] = csky_pmu_write_sic;
1016 hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac;
1017 hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc;
1018 hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac;
1019 hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc;
1020 hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac;
1021 hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc;
1022 hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac;
1023 hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc;
1025 csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1027 cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1));
1029 return perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW);
1031 arch_initcall(init_hw_perf_events);