1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Fast and scalable bitmaps.
5 * Copyright (C) 2016 Facebook
6 * Copyright (C) 2013-2014 Jens Axboe
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
18 * struct sbitmap_word - Word in a &struct sbitmap.
22 * @depth: Number of bits being used in @word/@cleared
27 * @word: word holding free bits
29 unsigned long word ____cacheline_aligned_in_smp;
32 * @cleared: word holding cleared bits
34 unsigned long cleared ____cacheline_aligned_in_smp;
37 * @swap_lock: Held while swapping word <-> cleared
40 } ____cacheline_aligned_in_smp;
43 * struct sbitmap - Scalable bitmap.
45 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
46 * trades off higher memory usage for better scalability.
50 * @depth: Number of bits used in the whole bitmap.
55 * @shift: log2(number of bits used per word)
60 * @map_nr: Number of words (cachelines) being used for the bitmap.
65 * @map: Allocated bitmap.
67 struct sbitmap_word *map;
70 #define SBQ_WAIT_QUEUES 8
71 #define SBQ_WAKE_BATCH 8
74 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
76 struct sbq_wait_state {
78 * @wait_cnt: Number of frees remaining before we wake up.
85 wait_queue_head_t wait;
86 } ____cacheline_aligned_in_smp;
89 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
92 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
93 * avoid contention on the wait queue spinlock. This ensures that we don't hit a
94 * scalability wall when we run out of free bits and have to start putting tasks
97 struct sbitmap_queue {
99 * @sb: Scalable bitmap.
104 * @alloc_hint: Cache of last successfully allocated or freed bit.
106 * This is per-cpu, which allows multiple users to stick to different
107 * cachelines until the map is exhausted.
109 unsigned int __percpu *alloc_hint;
112 * @wake_batch: Number of bits which must be freed before we wake up any
115 unsigned int wake_batch;
118 * @wake_index: Next wait queue in @ws to wake up.
125 struct sbq_wait_state *ws;
128 * @ws_active: count of currently active ws waitqueues
133 * @round_robin: Allocate bits in strict round-robin order.
138 * @min_shallow_depth: The minimum shallow depth which may be passed to
139 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
141 unsigned int min_shallow_depth;
145 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
146 * @sb: Bitmap to initialize.
147 * @depth: Number of bits to allocate.
148 * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
149 * given, a good default is chosen.
150 * @flags: Allocation flags.
151 * @node: Memory node to allocate on.
153 * Return: Zero on success or negative errno on failure.
155 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
156 gfp_t flags, int node);
159 * sbitmap_free() - Free memory used by a &struct sbitmap.
160 * @sb: Bitmap to free.
162 static inline void sbitmap_free(struct sbitmap *sb)
169 * sbitmap_resize() - Resize a &struct sbitmap.
170 * @sb: Bitmap to resize.
171 * @depth: New number of bits to resize to.
173 * Doesn't reallocate anything. It's up to the caller to ensure that the new
174 * depth doesn't exceed the depth that the sb was initialized with.
176 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
179 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
180 * @sb: Bitmap to allocate from.
181 * @alloc_hint: Hint for where to start searching for a free bit.
182 * @round_robin: If true, be stricter about allocation order; always allocate
183 * starting from the last allocated bit. This is less efficient
184 * than the default behavior (false).
186 * This operation provides acquire barrier semantics if it succeeds.
188 * Return: Non-negative allocated bit number if successful, -1 otherwise.
190 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
193 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
194 * limiting the depth used from each word.
195 * @sb: Bitmap to allocate from.
196 * @alloc_hint: Hint for where to start searching for a free bit.
197 * @shallow_depth: The maximum number of bits to allocate from a single word.
199 * This rather specific operation allows for having multiple users with
200 * different allocation limits. E.g., there can be a high-priority class that
201 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
202 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
203 * class can only allocate half of the total bits in the bitmap, preventing it
204 * from starving out the high-priority class.
206 * Return: Non-negative allocated bit number if successful, -1 otherwise.
208 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
209 unsigned long shallow_depth);
212 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
213 * @sb: Bitmap to check.
215 * Return: true if any bit in the bitmap is set, false otherwise.
217 bool sbitmap_any_bit_set(const struct sbitmap *sb);
219 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
220 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
222 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
225 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
226 * @start: Where to start the iteration.
227 * @sb: Bitmap to iterate over.
228 * @fn: Callback. Should return true to continue or false to break early.
229 * @data: Pointer to pass to callback.
231 * This is inline even though it's non-trivial so that the function calls to the
232 * callback will hopefully get optimized away.
234 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
236 sb_for_each_fn fn, void *data)
240 unsigned int scanned = 0;
242 if (start >= sb->depth)
244 index = SB_NR_TO_INDEX(sb, start);
245 nr = SB_NR_TO_BIT(sb, start);
247 while (scanned < sb->depth) {
249 unsigned int depth = min_t(unsigned int,
250 sb->map[index].depth - nr,
251 sb->depth - scanned);
254 word = sb->map[index].word & ~sb->map[index].cleared;
259 * On the first iteration of the outer loop, we need to add the
260 * bit offset back to the size of the word for find_next_bit().
261 * On all other iterations, nr is zero, so this is a noop.
265 nr = find_next_bit(&word, depth, nr);
268 if (!fn(sb, (index << sb->shift) + nr, data))
275 if (++index >= sb->map_nr)
281 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
282 * @sb: Bitmap to iterate over.
283 * @fn: Callback. Should return true to continue or false to break early.
284 * @data: Pointer to pass to callback.
286 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
289 __sbitmap_for_each_set(sb, 0, fn, data);
292 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
295 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
298 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
300 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
302 set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
305 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
307 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
311 * This one is special, since it doesn't actually clear the bit, rather it
312 * sets the corresponding bit in the ->cleared mask instead. Paired with
313 * the caller doing sbitmap_deferred_clear() if a given index is full, which
314 * will clear the previously freed entries in the corresponding ->word.
316 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
318 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
320 set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
323 static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
326 clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
329 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
331 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
335 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
336 * @sb: Bitmap to show.
337 * @m: struct seq_file to write to.
339 * This is intended for debugging. The format may change at any time.
341 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
344 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
346 * @sb: Bitmap to show.
347 * @m: struct seq_file to write to.
349 * This is intended for debugging. The output isn't guaranteed to be internally
352 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
355 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
357 * @sbq: Bitmap queue to initialize.
358 * @depth: See sbitmap_init_node().
359 * @shift: See sbitmap_init_node().
360 * @round_robin: See sbitmap_get().
361 * @flags: Allocation flags.
362 * @node: Memory node to allocate on.
364 * Return: Zero on success or negative errno on failure.
366 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
367 int shift, bool round_robin, gfp_t flags, int node);
370 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
372 * @sbq: Bitmap queue to free.
374 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
377 free_percpu(sbq->alloc_hint);
378 sbitmap_free(&sbq->sb);
382 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
383 * @sbq: Bitmap queue to resize.
384 * @depth: New number of bits to resize to.
386 * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
387 * some extra work on the &struct sbitmap_queue, so it's not safe to just
388 * resize the underlying &struct sbitmap.
390 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
393 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
394 * sbitmap_queue with preemption already disabled.
395 * @sbq: Bitmap queue to allocate from.
397 * Return: Non-negative allocated bit number if successful, -1 otherwise.
399 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
402 * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
403 * sbitmap_queue, limiting the depth used from each word, with preemption
405 * @sbq: Bitmap queue to allocate from.
406 * @shallow_depth: The maximum number of bits to allocate from a single word.
407 * See sbitmap_get_shallow().
409 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
412 * Return: Non-negative allocated bit number if successful, -1 otherwise.
414 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
415 unsigned int shallow_depth);
418 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
420 * @sbq: Bitmap queue to allocate from.
421 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
422 * sbitmap_queue_clear()).
424 * Return: Non-negative allocated bit number if successful, -1 otherwise.
426 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
432 nr = __sbitmap_queue_get(sbq);
438 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
439 * sbitmap_queue, limiting the depth used from each word.
440 * @sbq: Bitmap queue to allocate from.
441 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
442 * sbitmap_queue_clear()).
443 * @shallow_depth: The maximum number of bits to allocate from a single word.
444 * See sbitmap_get_shallow().
446 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
449 * Return: Non-negative allocated bit number if successful, -1 otherwise.
451 static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
453 unsigned int shallow_depth)
458 nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
464 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
465 * minimum shallow depth that will be used.
466 * @sbq: Bitmap queue in question.
467 * @min_shallow_depth: The minimum shallow depth that will be passed to
468 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
470 * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
471 * depends on the depth of the bitmap. Since the shallow allocation functions
472 * effectively operate with a different depth, the shallow depth must be taken
473 * into account when calculating the batch size. This function must be called
474 * with the minimum shallow depth that will be used. Failure to do so can result
477 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
478 unsigned int min_shallow_depth);
481 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
482 * &struct sbitmap_queue.
483 * @sbq: Bitmap to free from.
484 * @nr: Bit number to free.
485 * @cpu: CPU the bit was allocated on.
487 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
490 static inline int sbq_index_inc(int index)
492 return (index + 1) & (SBQ_WAIT_QUEUES - 1);
495 static inline void sbq_index_atomic_inc(atomic_t *index)
497 int old = atomic_read(index);
498 int new = sbq_index_inc(old);
499 atomic_cmpxchg(index, old, new);
503 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
505 * @sbq: Bitmap queue to wait on.
506 * @wait_index: A counter per "user" of @sbq.
508 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
509 atomic_t *wait_index)
511 struct sbq_wait_state *ws;
513 ws = &sbq->ws[atomic_read(wait_index)];
514 sbq_index_atomic_inc(wait_index);
519 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
521 * @sbq: Bitmap queue to wake up.
523 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
526 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
527 * on a &struct sbitmap_queue.
528 * @sbq: Bitmap queue to wake up.
530 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
533 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
535 * @sbq: Bitmap queue to show.
536 * @m: struct seq_file to write to.
538 * This is intended for debugging. The format may change at any time.
540 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
543 struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
544 struct wait_queue_entry wait;
547 #define DEFINE_SBQ_WAIT(name) \
548 struct sbq_wait name = { \
551 .private = current, \
552 .func = autoremove_wake_function, \
553 .entry = LIST_HEAD_INIT((name).wait.entry), \
558 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
561 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
562 struct sbq_wait_state *ws,
563 struct sbq_wait *sbq_wait, int state);
566 * Must be paired with sbitmap_prepare_to_wait().
568 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
569 struct sbq_wait *sbq_wait);
572 * Wrapper around add_wait_queue(), which maintains some extra internal state
574 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
575 struct sbq_wait_state *ws,
576 struct sbq_wait *sbq_wait);
579 * Must be paired with sbitmap_add_wait_queue()
581 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
583 #endif /* __LINUX_SCALE_BITMAP_H */