1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
5 * Copyright IBM Corporation, 2008
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
9 * For detailed explanation of Read-Copy Update mechanism see -
12 #ifndef __LINUX_TINY_H
13 #define __LINUX_TINY_H
15 #include <asm/param.h> /* for HZ */
17 unsigned long get_state_synchronize_rcu(void);
18 unsigned long start_poll_synchronize_rcu(void);
19 bool poll_state_synchronize_rcu(unsigned long oldstate);
21 static inline void cond_synchronize_rcu(unsigned long oldstate)
26 static inline unsigned long start_poll_synchronize_rcu_expedited(void)
28 return start_poll_synchronize_rcu();
31 static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
33 cond_synchronize_rcu(oldstate);
36 extern void rcu_barrier(void);
38 static inline void synchronize_rcu_expedited(void)
44 * Add one more declaration of kvfree() here. It is
45 * not so straight forward to just include <linux/mm.h>
46 * where it is defined due to getting many compile
47 * errors caused by that include.
49 extern void kvfree(const void *addr);
51 static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
58 // kvfree_rcu(one_arg) call.
61 kvfree((void *) func);
64 #ifdef CONFIG_KASAN_GENERIC
65 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
67 static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
69 __kvfree_call_rcu(head, func);
75 static inline void rcu_softirq_qs(void)
80 #define rcu_note_context_switch(preempt) \
83 rcu_tasks_qs(current, (preempt)); \
86 static inline int rcu_needs_cpu(void)
92 * Take advantage of the fact that there is only one CPU, which
93 * allows us to ignore virtualization-based context switches.
95 static inline void rcu_virt_note_context_switch(int cpu) { }
96 static inline void rcu_cpu_stall_reset(void) { }
97 static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
98 static inline void rcu_irq_exit_check_preempt(void) { }
99 #define rcu_is_idle_cpu(cpu) \
100 (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
101 static inline void exit_rcu(void) { }
102 static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
106 static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
108 void rcu_scheduler_starting(void);
109 #else /* #ifndef CONFIG_SRCU */
110 static inline void rcu_scheduler_starting(void) { }
111 #endif /* #else #ifndef CONFIG_SRCU */
112 static inline void rcu_end_inkernel_boot(void) { }
113 static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
114 static inline bool rcu_is_watching(void) { return true; }
115 static inline void rcu_momentary_dyntick_idle(void) { }
116 static inline void kfree_rcu_scheduler_running(void) { }
117 static inline bool rcu_gp_might_be_stalled(void) { return false; }
119 /* Avoid RCU read-side critical sections leaking across. */
120 static inline void rcu_all_qs(void) { barrier(); }
122 /* RCUtree hotplug events */
123 #define rcutree_prepare_cpu NULL
124 #define rcutree_online_cpu NULL
125 #define rcutree_offline_cpu NULL
126 #define rcutree_dead_cpu NULL
127 #define rcutree_dying_cpu NULL
128 static inline void rcu_cpu_starting(unsigned int cpu) { }
130 #endif /* __LINUX_RCUTINY_H */