1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 SiFive
4 * Copyright (C) 2018 Andes Technology Corporation
8 #ifndef _ASM_RISCV_PERF_EVENT_H
9 #define _ASM_RISCV_PERF_EVENT_H
11 #include <linux/perf_event.h>
12 #include <linux/ptrace.h>
14 #define RISCV_BASE_COUNTERS 2
17 * The RISCV_MAX_COUNTERS parameter should be specified.
20 #ifdef CONFIG_RISCV_BASE_PMU
21 #define RISCV_MAX_COUNTERS 2
24 #ifndef RISCV_MAX_COUNTERS
25 #error "Please provide a valid RISCV_MAX_COUNTERS for the PMU."
29 * These are the indexes of bits in counteren register *minus* 1,
30 * except for cycle. It would be coherent if it can directly mapped
31 * to counteren bit definition, but there is a *time* register at
32 * counteren[1]. Per-cpu structure is scarce resource here.
34 * According to the spec, an implementation can support counter up to
35 * mhpmcounter31, but many high-end processors has at most 6 general
36 * PMCs, we give the definition to MHPMCOUNTER8 here.
38 #define RISCV_PMU_CYCLE 0
39 #define RISCV_PMU_INSTRET 1
40 #define RISCV_PMU_MHPMCOUNTER3 2
41 #define RISCV_PMU_MHPMCOUNTER4 3
42 #define RISCV_PMU_MHPMCOUNTER5 4
43 #define RISCV_PMU_MHPMCOUNTER6 5
44 #define RISCV_PMU_MHPMCOUNTER7 6
45 #define RISCV_PMU_MHPMCOUNTER8 7
47 #define RISCV_OP_UNSUPP (-EOPNOTSUPP)
49 struct cpu_hw_events {
50 /* # currently enabled events*/
52 /* currently enabled events */
53 struct perf_event *events[RISCV_MAX_COUNTERS];
54 /* vendor-defined PMU data */
61 /* generic hw/cache events table */
63 const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
64 [PERF_COUNT_HW_CACHE_OP_MAX]
65 [PERF_COUNT_HW_CACHE_RESULT_MAX];
66 /* method used to map hw/cache events */
67 int (*map_hw_event)(u64 config);
68 int (*map_cache_event)(u64 config);
70 /* max generic hw events in map */
72 /* number total counters, 2(base) + x(general) */
74 /* the width of the counter */
77 /* vendor-defined PMU features */
80 irqreturn_t (*handle_irq)(int irq_num, void *dev);
84 #endif /* _ASM_RISCV_PERF_EVENT_H */