1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_TAG_H
3 #define INT_BLK_MQ_TAG_H
8 * Tag address space map.
12 unsigned int nr_reserved_tags;
14 atomic_t active_queues;
16 struct sbitmap_queue bitmap_tags;
17 struct sbitmap_queue breserved_tags;
20 struct request **static_rqs;
21 struct list_head page_list;
24 extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
25 unsigned int reserved_tags,
26 int node, unsigned int flags);
27 extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags);
29 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
30 extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
32 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
33 struct blk_mq_tags **tags,
34 unsigned int depth, bool can_grow);
35 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
36 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
38 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
41 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
42 struct blk_mq_hw_ctx *hctx)
46 return sbq_wait_ptr(bt, &hctx->wait_index);
52 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
55 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
56 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
58 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
60 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
63 return __blk_mq_tag_busy(hctx);
66 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
68 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
71 __blk_mq_tag_idle(hctx);
75 * For shared tag users, we track the number of currently active users
76 * and attempt to provide a fair share of the tag depth for each of them.
78 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
79 struct sbitmap_queue *bt)
81 unsigned int depth, users;
83 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
85 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
89 * Don't try dividing an ant
91 if (bt->sb.depth == 1)
94 users = atomic_read(&hctx->tags->active_queues);
99 * Allow at least some tags
101 depth = max((bt->sb.depth + users - 1) / users, 4U);
102 return atomic_read(&hctx->nr_active) < depth;
105 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
108 return tag < tags->nr_reserved_tags;