1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Fast and scalable bitmaps.
5 * Copyright (C) 2016 Facebook
6 * Copyright (C) 2013-2014 Jens Axboe
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/cache.h>
15 #include <linux/list.h>
16 #include <linux/log2.h>
17 #include <linux/minmax.h>
18 #include <linux/percpu.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
27 * struct sbitmap_word - Word in a &struct sbitmap.
31 * @depth: Number of bits being used in @word/@cleared
36 * @word: word holding free bits
38 unsigned long word ____cacheline_aligned_in_smp;
41 * @cleared: word holding cleared bits
43 unsigned long cleared ____cacheline_aligned_in_smp;
44 } ____cacheline_aligned_in_smp;
47 * struct sbitmap - Scalable bitmap.
49 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
50 * trades off higher memory usage for better scalability.
54 * @depth: Number of bits used in the whole bitmap.
59 * @shift: log2(number of bits used per word)
64 * @map_nr: Number of words (cachelines) being used for the bitmap.
69 * @round_robin: Allocate bits in strict round-robin order.
74 * @map: Allocated bitmap.
76 struct sbitmap_word *map;
79 * @alloc_hint: Cache of last successfully allocated or freed bit.
81 * This is per-cpu, which allows multiple users to stick to different
82 * cachelines until the map is exhausted.
84 unsigned int __percpu *alloc_hint;
87 #define SBQ_WAIT_QUEUES 8
88 #define SBQ_WAKE_BATCH 8
91 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
93 struct sbq_wait_state {
95 * @wait_cnt: Number of frees remaining before we wake up.
102 wait_queue_head_t wait;
103 } ____cacheline_aligned_in_smp;
106 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
109 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
110 * avoid contention on the wait queue spinlock. This ensures that we don't hit a
111 * scalability wall when we run out of free bits and have to start putting tasks
114 struct sbitmap_queue {
116 * @sb: Scalable bitmap.
121 * @wake_batch: Number of bits which must be freed before we wake up any
124 unsigned int wake_batch;
127 * @wake_index: Next wait queue in @ws to wake up.
134 struct sbq_wait_state *ws;
137 * @ws_active: count of currently active ws waitqueues
142 * @min_shallow_depth: The minimum shallow depth which may be passed to
143 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
145 unsigned int min_shallow_depth;
149 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
150 * @sb: Bitmap to initialize.
151 * @depth: Number of bits to allocate.
152 * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
153 * given, a good default is chosen.
154 * @flags: Allocation flags.
155 * @node: Memory node to allocate on.
156 * @round_robin: If true, be stricter about allocation order; always allocate
157 * starting from the last allocated bit. This is less efficient
158 * than the default behavior (false).
159 * @alloc_hint: If true, apply percpu hint for where to start searching for
162 * Return: Zero on success or negative errno on failure.
164 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
165 gfp_t flags, int node, bool round_robin, bool alloc_hint);
168 * sbitmap_free() - Free memory used by a &struct sbitmap.
169 * @sb: Bitmap to free.
171 static inline void sbitmap_free(struct sbitmap *sb)
173 free_percpu(sb->alloc_hint);
179 * sbitmap_resize() - Resize a &struct sbitmap.
180 * @sb: Bitmap to resize.
181 * @depth: New number of bits to resize to.
183 * Doesn't reallocate anything. It's up to the caller to ensure that the new
184 * depth doesn't exceed the depth that the sb was initialized with.
186 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
189 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
190 * @sb: Bitmap to allocate from.
192 * This operation provides acquire barrier semantics if it succeeds.
194 * Return: Non-negative allocated bit number if successful, -1 otherwise.
196 int sbitmap_get(struct sbitmap *sb);
199 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
200 * limiting the depth used from each word.
201 * @sb: Bitmap to allocate from.
202 * @shallow_depth: The maximum number of bits to allocate from a single word.
204 * This rather specific operation allows for having multiple users with
205 * different allocation limits. E.g., there can be a high-priority class that
206 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
207 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
208 * class can only allocate half of the total bits in the bitmap, preventing it
209 * from starving out the high-priority class.
211 * Return: Non-negative allocated bit number if successful, -1 otherwise.
213 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
216 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
217 * @sb: Bitmap to check.
219 * Return: true if any bit in the bitmap is set, false otherwise.
221 bool sbitmap_any_bit_set(const struct sbitmap *sb);
223 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
224 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
226 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
229 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
230 * @start: Where to start the iteration.
231 * @sb: Bitmap to iterate over.
232 * @fn: Callback. Should return true to continue or false to break early.
233 * @data: Pointer to pass to callback.
235 * This is inline even though it's non-trivial so that the function calls to the
236 * callback will hopefully get optimized away.
238 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
240 sb_for_each_fn fn, void *data)
244 unsigned int scanned = 0;
246 if (start >= sb->depth)
248 index = SB_NR_TO_INDEX(sb, start);
249 nr = SB_NR_TO_BIT(sb, start);
251 while (scanned < sb->depth) {
253 unsigned int depth = min_t(unsigned int,
254 sb->map[index].depth - nr,
255 sb->depth - scanned);
258 word = sb->map[index].word & ~sb->map[index].cleared;
263 * On the first iteration of the outer loop, we need to add the
264 * bit offset back to the size of the word for find_next_bit().
265 * On all other iterations, nr is zero, so this is a noop.
269 nr = find_next_bit(&word, depth, nr);
272 if (!fn(sb, (index << sb->shift) + nr, data))
279 if (++index >= sb->map_nr)
285 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
286 * @sb: Bitmap to iterate over.
287 * @fn: Callback. Should return true to continue or false to break early.
288 * @data: Pointer to pass to callback.
290 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
293 __sbitmap_for_each_set(sb, 0, fn, data);
296 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
299 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
302 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
304 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
306 set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
309 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
311 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
315 * This one is special, since it doesn't actually clear the bit, rather it
316 * sets the corresponding bit in the ->cleared mask instead. Paired with
317 * the caller doing sbitmap_deferred_clear() if a given index is full, which
318 * will clear the previously freed entries in the corresponding ->word.
320 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
322 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
324 set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
328 * Pair of sbitmap_get, and this one applies both cleared bit and
331 static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
333 sbitmap_deferred_clear_bit(sb, bitnr);
335 if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
336 *raw_cpu_ptr(sb->alloc_hint) = bitnr;
339 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
341 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
344 static inline int sbitmap_calculate_shift(unsigned int depth)
346 int shift = ilog2(BITS_PER_LONG);
349 * If the bitmap is small, shrink the number of bits per word so
350 * we spread over a few cachelines, at least. If less than 4
351 * bits, just forget about it, it's not going to work optimally
355 while ((4U << shift) > depth)
363 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
364 * @sb: Bitmap to show.
365 * @m: struct seq_file to write to.
367 * This is intended for debugging. The format may change at any time.
369 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
373 * sbitmap_weight() - Return how many set and not cleared bits in a &struct
375 * @sb: Bitmap to check.
377 * Return: How many set and not cleared bits set
379 unsigned int sbitmap_weight(const struct sbitmap *sb);
382 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
384 * @sb: Bitmap to show.
385 * @m: struct seq_file to write to.
387 * This is intended for debugging. The output isn't guaranteed to be internally
390 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
393 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
395 * @sbq: Bitmap queue to initialize.
396 * @depth: See sbitmap_init_node().
397 * @shift: See sbitmap_init_node().
398 * @round_robin: See sbitmap_get().
399 * @flags: Allocation flags.
400 * @node: Memory node to allocate on.
402 * Return: Zero on success or negative errno on failure.
404 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
405 int shift, bool round_robin, gfp_t flags, int node);
408 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
410 * @sbq: Bitmap queue to free.
412 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
415 sbitmap_free(&sbq->sb);
419 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
420 * @sbq: Bitmap queue to resize.
421 * @depth: New number of bits to resize to.
423 * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
424 * some extra work on the &struct sbitmap_queue, so it's not safe to just
425 * resize the underlying &struct sbitmap.
427 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
430 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
431 * sbitmap_queue with preemption already disabled.
432 * @sbq: Bitmap queue to allocate from.
434 * Return: Non-negative allocated bit number if successful, -1 otherwise.
436 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
439 * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
440 * @sbq: Bitmap queue to allocate from.
441 * @nr_tags: number of tags requested
442 * @offset: offset to add to returned bits
444 * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
445 * a bit in the mask returned, and the caller must add @offset to the value to
446 * get the absolute tag value.
448 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
449 unsigned int *offset);
452 * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
453 * sbitmap_queue, limiting the depth used from each word, with preemption
455 * @sbq: Bitmap queue to allocate from.
456 * @shallow_depth: The maximum number of bits to allocate from a single word.
457 * See sbitmap_get_shallow().
459 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
462 * Return: Non-negative allocated bit number if successful, -1 otherwise.
464 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
465 unsigned int shallow_depth);
468 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
470 * @sbq: Bitmap queue to allocate from.
471 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
472 * sbitmap_queue_clear()).
474 * Return: Non-negative allocated bit number if successful, -1 otherwise.
476 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
482 nr = __sbitmap_queue_get(sbq);
488 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
489 * sbitmap_queue, limiting the depth used from each word.
490 * @sbq: Bitmap queue to allocate from.
491 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
492 * sbitmap_queue_clear()).
493 * @shallow_depth: The maximum number of bits to allocate from a single word.
494 * See sbitmap_get_shallow().
496 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
499 * Return: Non-negative allocated bit number if successful, -1 otherwise.
501 static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
503 unsigned int shallow_depth)
508 nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
514 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
515 * minimum shallow depth that will be used.
516 * @sbq: Bitmap queue in question.
517 * @min_shallow_depth: The minimum shallow depth that will be passed to
518 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
520 * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
521 * depends on the depth of the bitmap. Since the shallow allocation functions
522 * effectively operate with a different depth, the shallow depth must be taken
523 * into account when calculating the batch size. This function must be called
524 * with the minimum shallow depth that will be used. Failure to do so can result
527 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
528 unsigned int min_shallow_depth);
531 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
532 * &struct sbitmap_queue.
533 * @sbq: Bitmap to free from.
534 * @nr: Bit number to free.
535 * @cpu: CPU the bit was allocated on.
537 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
541 * sbitmap_queue_clear_batch() - Free a batch of allocated bits
542 * &struct sbitmap_queue.
543 * @sbq: Bitmap to free from.
544 * @offset: offset for each tag in array
545 * @tags: array of tags
546 * @nr_tags: number of tags in array
548 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
549 int *tags, int nr_tags);
551 static inline int sbq_index_inc(int index)
553 return (index + 1) & (SBQ_WAIT_QUEUES - 1);
556 static inline void sbq_index_atomic_inc(atomic_t *index)
558 int old = atomic_read(index);
559 int new = sbq_index_inc(old);
560 atomic_cmpxchg(index, old, new);
564 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
566 * @sbq: Bitmap queue to wait on.
567 * @wait_index: A counter per "user" of @sbq.
569 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
570 atomic_t *wait_index)
572 struct sbq_wait_state *ws;
574 ws = &sbq->ws[atomic_read(wait_index)];
575 sbq_index_atomic_inc(wait_index);
580 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
582 * @sbq: Bitmap queue to wake up.
584 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
587 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
588 * on a &struct sbitmap_queue.
589 * @sbq: Bitmap queue to wake up.
591 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
594 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
596 * @sbq: Bitmap queue to show.
597 * @m: struct seq_file to write to.
599 * This is intended for debugging. The format may change at any time.
601 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
604 struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
605 struct wait_queue_entry wait;
608 #define DEFINE_SBQ_WAIT(name) \
609 struct sbq_wait name = { \
612 .private = current, \
613 .func = autoremove_wake_function, \
614 .entry = LIST_HEAD_INIT((name).wait.entry), \
619 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
622 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
623 struct sbq_wait_state *ws,
624 struct sbq_wait *sbq_wait, int state);
627 * Must be paired with sbitmap_prepare_to_wait().
629 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
630 struct sbq_wait *sbq_wait);
633 * Wrapper around add_wait_queue(), which maintains some extra internal state
635 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
636 struct sbq_wait_state *ws,
637 struct sbq_wait *sbq_wait);
640 * Must be paired with sbitmap_add_wait_queue()
642 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
644 #endif /* __LINUX_SCALE_BITMAP_H */