1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
16 #include <linux/gfp.h>
20 struct percpu_counter {
23 #ifdef CONFIG_HOTPLUG_CPU
24 struct list_head list; /* All percpu_counters are on a list */
26 s32 __percpu *counters;
29 extern int percpu_counter_batch;
31 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
32 struct lock_class_key *key);
34 #define percpu_counter_init(fbc, value, gfp) \
36 static struct lock_class_key __key; \
38 __percpu_counter_init(fbc, value, gfp, &__key); \
41 void percpu_counter_destroy(struct percpu_counter *fbc);
42 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
43 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
45 s64 __percpu_counter_sum(struct percpu_counter *fbc);
46 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
47 void percpu_counter_sync(struct percpu_counter *fbc);
49 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
51 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
54 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
56 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
59 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
61 s64 ret = __percpu_counter_sum(fbc);
62 return ret < 0 ? 0 : ret;
65 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
67 return __percpu_counter_sum(fbc);
70 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
76 * It is possible for the percpu_counter_read() to return a small negative
77 * number for some counter which should never be negative.
80 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
82 /* Prevent reloads of fbc->count */
83 s64 ret = READ_ONCE(fbc->count);
90 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
92 return (fbc->counters != NULL);
95 #else /* !CONFIG_SMP */
97 struct percpu_counter {
101 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
108 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
112 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
117 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
119 if (fbc->count > rhs)
121 else if (fbc->count < rhs)
128 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
130 return percpu_counter_compare(fbc, rhs);
134 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
137 fbc->count += amount;
142 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
144 percpu_counter_add(fbc, amount);
147 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
153 * percpu_counter is intended to track positive numbers. In the UP case the
154 * number should never be negative.
156 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
161 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
163 return percpu_counter_read_positive(fbc);
166 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
168 return percpu_counter_read(fbc);
171 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
176 static inline void percpu_counter_sync(struct percpu_counter *fbc)
179 #endif /* CONFIG_SMP */
181 static inline void percpu_counter_inc(struct percpu_counter *fbc)
183 percpu_counter_add(fbc, 1);
186 static inline void percpu_counter_dec(struct percpu_counter *fbc)
188 percpu_counter_add(fbc, -1);
191 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
193 percpu_counter_add(fbc, -amount);
196 #endif /* _LINUX_PERCPU_COUNTER_H */