1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/host.h>
23 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
25 /* Allow only 1 DCMD at a time */
26 return mq->in_flight[MMC_ISSUE_DCMD];
29 void mmc_cqe_check_busy(struct mmc_queue *mq)
31 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
32 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
34 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
37 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
39 return host->caps2 & MMC_CAP2_CQE_DCMD;
42 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
45 switch (req_op(req)) {
49 case REQ_OP_SECURE_ERASE:
50 return MMC_ISSUE_SYNC;
52 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
54 return MMC_ISSUE_ASYNC;
58 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
60 struct mmc_host *host = mq->card->host;
63 return mmc_cqe_issue_type(host, req);
65 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
66 return MMC_ISSUE_ASYNC;
68 return MMC_ISSUE_SYNC;
71 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
73 if (!mq->recovery_needed) {
74 mq->recovery_needed = true;
75 schedule_work(&mq->recovery_work);
79 void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
81 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
83 struct request *req = mmc_queue_req_to_req(mqrq);
84 struct request_queue *q = req->q;
85 struct mmc_queue *mq = q->queuedata;
88 spin_lock_irqsave(&mq->lock, flags);
89 __mmc_cqe_recovery_notifier(mq);
90 spin_unlock_irqrestore(&mq->lock, flags);
93 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
95 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
96 struct mmc_request *mrq = &mqrq->brq.mrq;
97 struct mmc_queue *mq = req->q->queuedata;
98 struct mmc_host *host = mq->card->host;
99 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
100 bool recovery_needed = false;
102 switch (issue_type) {
103 case MMC_ISSUE_ASYNC:
105 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
107 __mmc_cqe_recovery_notifier(mq);
108 return BLK_EH_RESET_TIMER;
110 /* No timeout (XXX: huh? comment doesn't make much sense) */
111 blk_mq_complete_request(req);
114 /* Timeout is handled by mmc core */
115 return BLK_EH_RESET_TIMER;
119 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
122 struct request_queue *q = req->q;
123 struct mmc_queue *mq = q->queuedata;
127 spin_lock_irqsave(&mq->lock, flags);
129 if (mq->recovery_needed || !mq->use_cqe)
130 ret = BLK_EH_RESET_TIMER;
132 ret = mmc_cqe_timed_out(req);
134 spin_unlock_irqrestore(&mq->lock, flags);
139 static void mmc_mq_recovery_handler(struct work_struct *work)
141 struct mmc_queue *mq = container_of(work, struct mmc_queue,
143 struct request_queue *q = mq->queue;
145 mmc_get_card(mq->card, &mq->ctx);
147 mq->in_recovery = true;
150 mmc_blk_cqe_recovery(mq);
152 mmc_blk_mq_recovery(mq);
154 mq->in_recovery = false;
156 spin_lock_irq(&mq->lock);
157 mq->recovery_needed = false;
158 spin_unlock_irq(&mq->lock);
160 mmc_put_card(mq->card, &mq->ctx);
162 blk_mq_run_hw_queues(q, true);
165 static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
167 struct scatterlist *sg;
169 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
171 sg_init_table(sg, sg_len);
176 static void mmc_queue_setup_discard(struct request_queue *q,
177 struct mmc_card *card)
179 unsigned max_discard;
181 max_discard = mmc_calc_max_discard(card);
185 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
186 blk_queue_max_discard_sectors(q, max_discard);
187 q->limits.discard_granularity = card->pref_erase << 9;
188 /* granularity must not be greater than max. discard */
189 if (card->pref_erase > max_discard)
190 q->limits.discard_granularity = 0;
191 if (mmc_can_secure_erase_trim(card))
192 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
196 * mmc_init_request() - initialize the MMC-specific per-request data
197 * @q: the request queue
199 * @gfp: memory allocation policy
201 static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
204 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
205 struct mmc_card *card = mq->card;
206 struct mmc_host *host = card->host;
208 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
215 static void mmc_exit_request(struct request_queue *q, struct request *req)
217 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
223 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
224 unsigned int hctx_idx, unsigned int numa_node)
226 return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
229 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
230 unsigned int hctx_idx)
232 struct mmc_queue *mq = set->driver_data;
234 mmc_exit_request(mq->queue, req);
237 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
238 const struct blk_mq_queue_data *bd)
240 struct request *req = bd->rq;
241 struct request_queue *q = req->q;
242 struct mmc_queue *mq = q->queuedata;
243 struct mmc_card *card = mq->card;
244 struct mmc_host *host = card->host;
245 enum mmc_issue_type issue_type;
246 enum mmc_issued issued;
247 bool get_card, cqe_retune_ok;
250 if (mmc_card_removed(mq->card)) {
251 req->rq_flags |= RQF_QUIET;
252 return BLK_STS_IOERR;
255 issue_type = mmc_issue_type(mq, req);
257 spin_lock_irq(&mq->lock);
259 if (mq->recovery_needed || mq->busy) {
260 spin_unlock_irq(&mq->lock);
261 return BLK_STS_RESOURCE;
264 switch (issue_type) {
266 if (mmc_cqe_dcmd_busy(mq)) {
267 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
268 spin_unlock_irq(&mq->lock);
269 return BLK_STS_RESOURCE;
272 case MMC_ISSUE_ASYNC:
276 * Timeouts are handled by mmc core, and we don't have a host
277 * API to abort requests, so we can't handle the timeout anyway.
278 * However, when the timeout happens, blk_mq_complete_request()
279 * no longer works (to stop the request disappearing under us).
280 * To avoid racing with that, set a large timeout.
282 req->timeout = 600 * HZ;
286 /* Parallel dispatch of requests is not supported at the moment */
289 mq->in_flight[issue_type] += 1;
290 get_card = (mmc_tot_in_flight(mq) == 1);
291 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
293 spin_unlock_irq(&mq->lock);
295 if (!(req->rq_flags & RQF_DONTPREP)) {
296 req_to_mmc_queue_req(req)->retries = 0;
297 req->rq_flags |= RQF_DONTPREP;
301 mmc_get_card(card, &mq->ctx);
304 host->retune_now = host->need_retune && cqe_retune_ok &&
308 blk_mq_start_request(req);
310 issued = mmc_blk_mq_issue_rq(mq, req);
314 ret = BLK_STS_RESOURCE;
316 case MMC_REQ_FAILED_TO_START:
324 if (issued != MMC_REQ_STARTED) {
325 bool put_card = false;
327 spin_lock_irq(&mq->lock);
328 mq->in_flight[issue_type] -= 1;
329 if (mmc_tot_in_flight(mq) == 0)
332 spin_unlock_irq(&mq->lock);
334 mmc_put_card(card, &mq->ctx);
336 WRITE_ONCE(mq->busy, false);
342 static const struct blk_mq_ops mmc_mq_ops = {
343 .queue_rq = mmc_mq_queue_rq,
344 .init_request = mmc_mq_init_request,
345 .exit_request = mmc_mq_exit_request,
346 .complete = mmc_blk_mq_complete,
347 .timeout = mmc_mq_timed_out,
350 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
352 struct mmc_host *host = card->host;
353 u64 limit = BLK_BOUNCE_HIGH;
354 unsigned block_size = 512;
356 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
357 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
359 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
360 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
361 if (mmc_can_erase(card))
362 mmc_queue_setup_discard(mq->queue, card);
364 blk_queue_bounce_limit(mq->queue, limit);
365 blk_queue_max_hw_sectors(mq->queue,
366 min(host->max_blk_count, host->max_req_size / 512));
367 blk_queue_max_segments(mq->queue, host->max_segs);
369 if (mmc_card_mmc(card))
370 block_size = card->ext_csd.data_sector_size;
372 blk_queue_logical_block_size(mq->queue, block_size);
373 blk_queue_max_segment_size(mq->queue,
374 round_down(host->max_seg_size, block_size));
376 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
378 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
379 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
381 mutex_init(&mq->complete_lock);
383 init_waitqueue_head(&mq->wait);
386 /* Set queue depth to get a reasonable value for q->nr_requests */
387 #define MMC_QUEUE_DEPTH 64
390 * mmc_init_queue - initialise a queue structure.
392 * @card: mmc card to attach this queue
394 * Initialise a MMC card request queue.
396 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
398 struct mmc_host *host = card->host;
402 mq->use_cqe = host->cqe_enabled;
404 spin_lock_init(&mq->lock);
406 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
407 mq->tag_set.ops = &mmc_mq_ops;
409 * The queue depth for CQE must match the hardware because the request
410 * tag is used to index the hardware queue.
413 mq->tag_set.queue_depth =
414 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
416 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
417 mq->tag_set.numa_node = NUMA_NO_NODE;
418 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
419 mq->tag_set.nr_hw_queues = 1;
420 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
421 mq->tag_set.driver_data = mq;
423 ret = blk_mq_alloc_tag_set(&mq->tag_set);
427 mq->queue = blk_mq_init_queue(&mq->tag_set);
428 if (IS_ERR(mq->queue)) {
429 ret = PTR_ERR(mq->queue);
433 mq->queue->queuedata = mq;
434 blk_queue_rq_timeout(mq->queue, 60 * HZ);
436 mmc_setup_queue(mq, card);
440 blk_mq_free_tag_set(&mq->tag_set);
444 void mmc_queue_suspend(struct mmc_queue *mq)
446 blk_mq_quiesce_queue(mq->queue);
449 * The host remains claimed while there are outstanding requests, so
450 * simply claiming and releasing here ensures there are none.
452 mmc_claim_host(mq->card->host);
453 mmc_release_host(mq->card->host);
456 void mmc_queue_resume(struct mmc_queue *mq)
458 blk_mq_unquiesce_queue(mq->queue);
461 void mmc_cleanup_queue(struct mmc_queue *mq)
463 struct request_queue *q = mq->queue;
466 * The legacy code handled the possibility of being suspended,
467 * so do that here too.
469 if (blk_queue_quiesced(q))
470 blk_mq_unquiesce_queue(q);
472 blk_cleanup_queue(q);
473 blk_mq_free_tag_set(&mq->tag_set);
476 * A request can be completed before the next request, potentially
477 * leaving a complete_work with nothing to do. Such a work item might
478 * still be queued at this point. Flush it.
480 flush_work(&mq->complete_work);
486 * Prepare the sg list(s) to be handed of to the host driver
488 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
490 struct request *req = mmc_queue_req_to_req(mqrq);
492 return blk_rq_map_sg(mq->queue, req, mqrq->sg);