1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
22 struct percpu_counter {
25 #ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list; /* All percpu_counters are on a list */
28 s32 __percpu *counters;
31 extern int percpu_counter_batch;
33 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
34 struct lock_class_key *key);
36 #define percpu_counter_init(fbc, value, gfp) \
38 static struct lock_class_key __key; \
40 __percpu_counter_init(fbc, value, gfp, &__key); \
43 void percpu_counter_destroy(struct percpu_counter *fbc);
44 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
45 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
47 s64 __percpu_counter_sum(struct percpu_counter *fbc);
48 s64 percpu_counter_sum_all(struct percpu_counter *fbc);
49 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
50 void percpu_counter_sync(struct percpu_counter *fbc);
52 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
54 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
57 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
59 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
63 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
64 * are accumulated in local per cpu counter and not in fbc->count until
65 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
67 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
68 * used to add up the counts from each CPU to account for all the local
69 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
70 * should be used when a counter is updated frequently and read rarely.
73 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
75 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
78 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
80 s64 ret = __percpu_counter_sum(fbc);
81 return ret < 0 ? 0 : ret;
84 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
86 return __percpu_counter_sum(fbc);
89 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
95 * It is possible for the percpu_counter_read() to return a small negative
96 * number for some counter which should never be negative.
99 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
101 /* Prevent reloads of fbc->count */
102 s64 ret = READ_ONCE(fbc->count);
109 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
111 return (fbc->counters != NULL);
114 #else /* !CONFIG_SMP */
116 struct percpu_counter {
120 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
127 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
131 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
136 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
138 if (fbc->count > rhs)
140 else if (fbc->count < rhs)
147 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
149 return percpu_counter_compare(fbc, rhs);
153 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
156 fbc->count += amount;
160 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
162 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
164 percpu_counter_add(fbc, amount);
168 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
170 percpu_counter_add(fbc, amount);
173 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
179 * percpu_counter is intended to track positive numbers. In the UP case the
180 * number should never be negative.
182 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
187 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
189 return percpu_counter_read_positive(fbc);
192 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
194 return percpu_counter_read(fbc);
197 static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
199 return percpu_counter_read(fbc);
202 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
207 static inline void percpu_counter_sync(struct percpu_counter *fbc)
210 #endif /* CONFIG_SMP */
212 static inline void percpu_counter_inc(struct percpu_counter *fbc)
214 percpu_counter_add(fbc, 1);
217 static inline void percpu_counter_dec(struct percpu_counter *fbc)
219 percpu_counter_add(fbc, -1);
222 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
224 percpu_counter_add(fbc, -amount);
228 percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
230 percpu_counter_add_local(fbc, -amount);
233 #endif /* _LINUX_PERCPU_COUNTER_H */