Merge tag 'nfsd-5.10' of git://linux-nfs.org/~bfields/linux
[linux-2.6-microblaze.git] / include / kvm / arm_pmu.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6
7 #ifndef __ASM_ARM_KVM_PMU_H
8 #define __ASM_ARM_KVM_PMU_H
9
10 #include <linux/perf_event.h>
11 #include <asm/perf_event.h>
12
13 #define ARMV8_PMU_CYCLE_IDX             (ARMV8_PMU_MAX_COUNTERS - 1)
14 #define ARMV8_PMU_MAX_COUNTER_PAIRS     ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
15
16 #ifdef CONFIG_KVM_ARM_PMU
17
18 struct kvm_pmc {
19         u8 idx; /* index into the pmu->pmc array */
20         struct perf_event *perf_event;
21 };
22
23 struct kvm_pmu {
24         int irq_num;
25         struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
26         DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
27         bool ready;
28         bool created;
29         bool irq_level;
30         struct irq_work overflow_work;
31 };
32
33 #define kvm_arm_pmu_v3_ready(v)         ((v)->arch.pmu.ready)
34 #define kvm_arm_pmu_irq_initialized(v)  ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
35 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
36 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
37 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
38 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
39 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
40 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
41 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
42 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
43 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
44 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
45 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
46 void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
47 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
48 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
49 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
50                                     u64 select_idx);
51 bool kvm_arm_support_pmu_v3(void);
52 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
53                             struct kvm_device_attr *attr);
54 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
55                             struct kvm_device_attr *attr);
56 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
57                             struct kvm_device_attr *attr);
58 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
59 #else
60 struct kvm_pmu {
61 };
62
63 #define kvm_arm_pmu_v3_ready(v)         (false)
64 #define kvm_arm_pmu_irq_initialized(v)  (false)
65 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
66                                             u64 select_idx)
67 {
68         return 0;
69 }
70 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
71                                              u64 select_idx, u64 val) {}
72 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
73 {
74         return 0;
75 }
76 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
77 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
78 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
79 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
80 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
81 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
82 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
83 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
84 {
85         return false;
86 }
87 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
88 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
89 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
90 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
91                                                   u64 data, u64 select_idx) {}
92 static inline bool kvm_arm_support_pmu_v3(void) { return false; }
93 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
94                                           struct kvm_device_attr *attr)
95 {
96         return -ENXIO;
97 }
98 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
99                                           struct kvm_device_attr *attr)
100 {
101         return -ENXIO;
102 }
103 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
104                                           struct kvm_device_attr *attr)
105 {
106         return -ENXIO;
107 }
108 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
109 {
110         return 0;
111 }
112 #endif
113
114 #endif