1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_TAG_H
3 #define INT_BLK_MQ_TAG_H
8 * Tag address space map.
12 unsigned int nr_reserved_tags;
14 atomic_t active_queues;
16 struct sbitmap_queue *bitmap_tags;
17 struct sbitmap_queue *breserved_tags;
19 struct sbitmap_queue __bitmap_tags;
20 struct sbitmap_queue __breserved_tags;
23 struct request **static_rqs;
24 struct list_head page_list;
27 extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
28 unsigned int reserved_tags,
29 int node, unsigned int flags);
30 extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags);
32 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
33 extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
35 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
36 struct blk_mq_tags **tags,
37 unsigned int depth, bool can_grow);
38 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
39 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
41 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
44 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
45 struct blk_mq_hw_ctx *hctx)
49 return sbq_wait_ptr(bt, &hctx->wait_index);
55 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
58 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
59 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
61 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
63 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
66 return __blk_mq_tag_busy(hctx);
69 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
71 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
74 __blk_mq_tag_idle(hctx);
78 * For shared tag users, we track the number of currently active users
79 * and attempt to provide a fair share of the tag depth for each of them.
81 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
82 struct sbitmap_queue *bt)
84 unsigned int depth, users;
86 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
88 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
92 * Don't try dividing an ant
94 if (bt->sb.depth == 1)
97 users = atomic_read(&hctx->tags->active_queues);
102 * Allow at least some tags
104 depth = max((bt->sb.depth + users - 1) / users, 4U);
105 return atomic_read(&hctx->nr_active) < depth;
108 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
111 return tag < tags->nr_reserved_tags;