lib/nodemask: inline next_node_in() and node_random()
[linux-2.6-microblaze.git] / include / linux / vtime.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KERNEL_VTIME_H
3 #define _LINUX_KERNEL_VTIME_H
4
5 #include <linux/context_tracking_state.h>
6 #include <linux/sched.h>
7
8 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
9 #include <asm/vtime.h>
10 #endif
11
12 /*
13  * Common vtime APIs
14  */
15 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
16 extern void vtime_account_kernel(struct task_struct *tsk);
17 extern void vtime_account_idle(struct task_struct *tsk);
18 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
19
20 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
21 extern void arch_vtime_task_switch(struct task_struct *tsk);
22 extern void vtime_user_enter(struct task_struct *tsk);
23 extern void vtime_user_exit(struct task_struct *tsk);
24 extern void vtime_guest_enter(struct task_struct *tsk);
25 extern void vtime_guest_exit(struct task_struct *tsk);
26 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
27 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
28 static inline void vtime_user_enter(struct task_struct *tsk) { }
29 static inline void vtime_user_exit(struct task_struct *tsk) { }
30 static inline void vtime_guest_enter(struct task_struct *tsk) { }
31 static inline void vtime_guest_exit(struct task_struct *tsk) { }
32 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
33 #endif
34
35 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
36 extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
37 extern void vtime_account_softirq(struct task_struct *tsk);
38 extern void vtime_account_hardirq(struct task_struct *tsk);
39 extern void vtime_flush(struct task_struct *tsk);
40 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
41 static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
42 static inline void vtime_account_softirq(struct task_struct *tsk) { }
43 static inline void vtime_account_hardirq(struct task_struct *tsk) { }
44 static inline void vtime_flush(struct task_struct *tsk) { }
45 #endif
46
47 /*
48  * vtime_accounting_enabled_this_cpu() definitions/declarations
49  */
50 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
51
52 static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
53 extern void vtime_task_switch(struct task_struct *prev);
54
55 static __always_inline void vtime_account_guest_enter(void)
56 {
57         vtime_account_kernel(current);
58         current->flags |= PF_VCPU;
59 }
60
61 static __always_inline void vtime_account_guest_exit(void)
62 {
63         vtime_account_kernel(current);
64         current->flags &= ~PF_VCPU;
65 }
66
67 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
68
69 /*
70  * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
71  * in that case and compute the tickless cputime.
72  * For now vtime state is tied to context tracking. We might want to decouple
73  * those later if necessary.
74  */
75 static inline bool vtime_accounting_enabled(void)
76 {
77         return context_tracking_enabled();
78 }
79
80 static inline bool vtime_accounting_enabled_cpu(int cpu)
81 {
82         return context_tracking_enabled_cpu(cpu);
83 }
84
85 static inline bool vtime_accounting_enabled_this_cpu(void)
86 {
87         return context_tracking_enabled_this_cpu();
88 }
89
90 extern void vtime_task_switch_generic(struct task_struct *prev);
91
92 static inline void vtime_task_switch(struct task_struct *prev)
93 {
94         if (vtime_accounting_enabled_this_cpu())
95                 vtime_task_switch_generic(prev);
96 }
97
98 static __always_inline void vtime_account_guest_enter(void)
99 {
100         if (vtime_accounting_enabled_this_cpu())
101                 vtime_guest_enter(current);
102         else
103                 current->flags |= PF_VCPU;
104 }
105
106 static __always_inline void vtime_account_guest_exit(void)
107 {
108         if (vtime_accounting_enabled_this_cpu())
109                 vtime_guest_exit(current);
110         else
111                 current->flags &= ~PF_VCPU;
112 }
113
114 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
115
116 static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
117 static inline void vtime_task_switch(struct task_struct *prev) { }
118
119 static __always_inline void vtime_account_guest_enter(void)
120 {
121         current->flags |= PF_VCPU;
122 }
123
124 static __always_inline void vtime_account_guest_exit(void)
125 {
126         current->flags &= ~PF_VCPU;
127 }
128
129 #endif
130
131
132 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
133 extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
134 #else
135 static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
136 #endif
137
138 static inline void account_softirq_enter(struct task_struct *tsk)
139 {
140         vtime_account_irq(tsk, SOFTIRQ_OFFSET);
141         irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
142 }
143
144 static inline void account_softirq_exit(struct task_struct *tsk)
145 {
146         vtime_account_softirq(tsk);
147         irqtime_account_irq(tsk, 0);
148 }
149
150 static inline void account_hardirq_enter(struct task_struct *tsk)
151 {
152         vtime_account_irq(tsk, HARDIRQ_OFFSET);
153         irqtime_account_irq(tsk, HARDIRQ_OFFSET);
154 }
155
156 static inline void account_hardirq_exit(struct task_struct *tsk)
157 {
158         vtime_account_hardirq(tsk);
159         irqtime_account_irq(tsk, 0);
160 }
161
162 #endif /* _LINUX_KERNEL_VTIME_H */