LoongArch: Make -mstrict-align configurable
[linux-2.6-microblaze.git] / lib / percpu_counter.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fast batching percpu counters.
4  */
5
6 #include <linux/percpu_counter.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
12
13 #ifdef CONFIG_HOTPLUG_CPU
14 static LIST_HEAD(percpu_counters);
15 static DEFINE_SPINLOCK(percpu_counters_lock);
16 #endif
17
18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
19
20 static const struct debug_obj_descr percpu_counter_debug_descr;
21
22 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
23 {
24         struct percpu_counter *fbc = addr;
25
26         switch (state) {
27         case ODEBUG_STATE_ACTIVE:
28                 percpu_counter_destroy(fbc);
29                 debug_object_free(fbc, &percpu_counter_debug_descr);
30                 return true;
31         default:
32                 return false;
33         }
34 }
35
36 static const struct debug_obj_descr percpu_counter_debug_descr = {
37         .name           = "percpu_counter",
38         .fixup_free     = percpu_counter_fixup_free,
39 };
40
41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
42 {
43         debug_object_init(fbc, &percpu_counter_debug_descr);
44         debug_object_activate(fbc, &percpu_counter_debug_descr);
45 }
46
47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
48 {
49         debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50         debug_object_free(fbc, &percpu_counter_debug_descr);
51 }
52
53 #else   /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55 { }
56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57 { }
58 #endif  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
59
60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61 {
62         int cpu;
63         unsigned long flags;
64
65         raw_spin_lock_irqsave(&fbc->lock, flags);
66         for_each_possible_cpu(cpu) {
67                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68                 *pcount = 0;
69         }
70         fbc->count = amount;
71         raw_spin_unlock_irqrestore(&fbc->lock, flags);
72 }
73 EXPORT_SYMBOL(percpu_counter_set);
74
75 /*
76  * This function is both preempt and irq safe. The former is due to explicit
77  * preemption disable. The latter is guaranteed by the fact that the slow path
78  * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
79  * this_cpu_add which is irq-safe by definition. Hence there is no need muck
80  * with irq state before calling this one
81  */
82 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
83 {
84         s64 count;
85
86         preempt_disable();
87         count = __this_cpu_read(*fbc->counters) + amount;
88         if (abs(count) >= batch) {
89                 unsigned long flags;
90                 raw_spin_lock_irqsave(&fbc->lock, flags);
91                 fbc->count += count;
92                 __this_cpu_sub(*fbc->counters, count - amount);
93                 raw_spin_unlock_irqrestore(&fbc->lock, flags);
94         } else {
95                 this_cpu_add(*fbc->counters, amount);
96         }
97         preempt_enable();
98 }
99 EXPORT_SYMBOL(percpu_counter_add_batch);
100
101 /*
102  * For percpu_counter with a big batch, the devication of its count could
103  * be big, and there is requirement to reduce the deviation, like when the
104  * counter's batch could be runtime decreased to get a better accuracy,
105  * which can be achieved by running this sync function on each CPU.
106  */
107 void percpu_counter_sync(struct percpu_counter *fbc)
108 {
109         unsigned long flags;
110         s64 count;
111
112         raw_spin_lock_irqsave(&fbc->lock, flags);
113         count = __this_cpu_read(*fbc->counters);
114         fbc->count += count;
115         __this_cpu_sub(*fbc->counters, count);
116         raw_spin_unlock_irqrestore(&fbc->lock, flags);
117 }
118 EXPORT_SYMBOL(percpu_counter_sync);
119
120 static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
121                               const struct cpumask *cpu_mask)
122 {
123         s64 ret;
124         int cpu;
125         unsigned long flags;
126
127         raw_spin_lock_irqsave(&fbc->lock, flags);
128         ret = fbc->count;
129         for_each_cpu(cpu, cpu_mask) {
130                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
131                 ret += *pcount;
132         }
133         raw_spin_unlock_irqrestore(&fbc->lock, flags);
134         return ret;
135 }
136
137 /*
138  * Add up all the per-cpu counts, return the result.  This is a more accurate
139  * but much slower version of percpu_counter_read_positive()
140  */
141 s64 __percpu_counter_sum(struct percpu_counter *fbc)
142 {
143         return __percpu_counter_sum_mask(fbc, cpu_online_mask);
144 }
145 EXPORT_SYMBOL(__percpu_counter_sum);
146
147 /*
148  * This is slower version of percpu_counter_sum as it traverses all possible
149  * cpus. Use this only in the cases where accurate data is needed in the
150  * presense of CPUs getting offlined.
151  */
152 s64 percpu_counter_sum_all(struct percpu_counter *fbc)
153 {
154         return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
155 }
156 EXPORT_SYMBOL(percpu_counter_sum_all);
157
158 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
159                           struct lock_class_key *key)
160 {
161         unsigned long flags __maybe_unused;
162
163         raw_spin_lock_init(&fbc->lock);
164         lockdep_set_class(&fbc->lock, key);
165         fbc->count = amount;
166         fbc->counters = alloc_percpu_gfp(s32, gfp);
167         if (!fbc->counters)
168                 return -ENOMEM;
169
170         debug_percpu_counter_activate(fbc);
171
172 #ifdef CONFIG_HOTPLUG_CPU
173         INIT_LIST_HEAD(&fbc->list);
174         spin_lock_irqsave(&percpu_counters_lock, flags);
175         list_add(&fbc->list, &percpu_counters);
176         spin_unlock_irqrestore(&percpu_counters_lock, flags);
177 #endif
178         return 0;
179 }
180 EXPORT_SYMBOL(__percpu_counter_init);
181
182 void percpu_counter_destroy(struct percpu_counter *fbc)
183 {
184         unsigned long flags __maybe_unused;
185
186         if (!fbc->counters)
187                 return;
188
189         debug_percpu_counter_deactivate(fbc);
190
191 #ifdef CONFIG_HOTPLUG_CPU
192         spin_lock_irqsave(&percpu_counters_lock, flags);
193         list_del(&fbc->list);
194         spin_unlock_irqrestore(&percpu_counters_lock, flags);
195 #endif
196         free_percpu(fbc->counters);
197         fbc->counters = NULL;
198 }
199 EXPORT_SYMBOL(percpu_counter_destroy);
200
201 int percpu_counter_batch __read_mostly = 32;
202 EXPORT_SYMBOL(percpu_counter_batch);
203
204 static int compute_batch_value(unsigned int cpu)
205 {
206         int nr = num_online_cpus();
207
208         percpu_counter_batch = max(32, nr*2);
209         return 0;
210 }
211
212 static int percpu_counter_cpu_dead(unsigned int cpu)
213 {
214 #ifdef CONFIG_HOTPLUG_CPU
215         struct percpu_counter *fbc;
216
217         compute_batch_value(cpu);
218
219         spin_lock_irq(&percpu_counters_lock);
220         list_for_each_entry(fbc, &percpu_counters, list) {
221                 s32 *pcount;
222
223                 raw_spin_lock(&fbc->lock);
224                 pcount = per_cpu_ptr(fbc->counters, cpu);
225                 fbc->count += *pcount;
226                 *pcount = 0;
227                 raw_spin_unlock(&fbc->lock);
228         }
229         spin_unlock_irq(&percpu_counters_lock);
230 #endif
231         return 0;
232 }
233
234 /*
235  * Compare counter against given value.
236  * Return 1 if greater, 0 if equal and -1 if less
237  */
238 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
239 {
240         s64     count;
241
242         count = percpu_counter_read(fbc);
243         /* Check to see if rough count will be sufficient for comparison */
244         if (abs(count - rhs) > (batch * num_online_cpus())) {
245                 if (count > rhs)
246                         return 1;
247                 else
248                         return -1;
249         }
250         /* Need to use precise count */
251         count = percpu_counter_sum(fbc);
252         if (count > rhs)
253                 return 1;
254         else if (count < rhs)
255                 return -1;
256         else
257                 return 0;
258 }
259 EXPORT_SYMBOL(__percpu_counter_compare);
260
261 static int __init percpu_counter_startup(void)
262 {
263         int ret;
264
265         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
266                                 compute_batch_value, NULL);
267         WARN_ON(ret < 0);
268         ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
269                                         "lib/percpu_cnt:dead", NULL,
270                                         percpu_counter_cpu_dead);
271         WARN_ON(ret < 0);
272         return 0;
273 }
274 module_init(percpu_counter_startup);