1 // SPDX-License-Identifier: GPL-2.0
4 * MMC software queue support based on command queue interfaces
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
15 #define HSQ_NUM_SLOTS 64
16 #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
18 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
20 struct mmc_host *mmc = hsq->mmc;
21 struct hsq_slot *slot;
24 spin_lock_irqsave(&hsq->lock, flags);
26 /* Make sure we are not already running a request now */
28 spin_unlock_irqrestore(&hsq->lock, flags);
32 /* Make sure there are remain requests need to pump */
33 if (!hsq->qcnt || !hsq->enabled) {
34 spin_unlock_irqrestore(&hsq->lock, flags);
38 slot = &hsq->slot[hsq->next_tag];
42 spin_unlock_irqrestore(&hsq->lock, flags);
44 mmc->ops->request(mmc, hsq->mrq);
47 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
49 struct hsq_slot *slot;
53 * If there are no remain requests in software queue, then set a invalid
57 hsq->next_tag = HSQ_INVALID_TAG;
62 * Increasing the next tag and check if the corresponding request is
63 * available, if yes, then we found a candidate request.
65 if (++hsq->next_tag != HSQ_INVALID_TAG) {
66 slot = &hsq->slot[hsq->next_tag];
71 /* Othersie we should iterate all slots to find a available tag. */
72 for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
73 slot = &hsq->slot[tag];
78 if (tag == HSQ_NUM_SLOTS)
79 tag = HSQ_INVALID_TAG;
84 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
89 spin_lock_irqsave(&hsq->lock, flags);
94 /* Update the next available tag to be queued. */
95 mmc_hsq_update_next_tag(hsq, remains);
97 if (hsq->waiting_for_idle && !remains) {
98 hsq->waiting_for_idle = false;
99 wake_up(&hsq->wait_queue);
102 /* Do not pump new request in recovery mode. */
103 if (hsq->recovery_halt) {
104 spin_unlock_irqrestore(&hsq->lock, flags);
108 spin_unlock_irqrestore(&hsq->lock, flags);
111 * Try to pump new request to host controller as fast as possible,
112 * after completing previous request.
115 mmc_hsq_pump_requests(hsq);
119 * mmc_hsq_finalize_request - finalize one request if the request is done
120 * @mmc: the host controller
121 * @mrq: the request need to be finalized
123 * Return true if we finalized the corresponding request in software queue,
124 * otherwise return false.
126 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
128 struct mmc_hsq *hsq = mmc->cqe_private;
131 spin_lock_irqsave(&hsq->lock, flags);
133 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
134 spin_unlock_irqrestore(&hsq->lock, flags);
139 * Clear current completed slot request to make a room for new request.
141 hsq->slot[hsq->next_tag].mrq = NULL;
143 spin_unlock_irqrestore(&hsq->lock, flags);
145 mmc_cqe_request_done(mmc, hsq->mrq);
147 mmc_hsq_post_request(hsq);
151 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
153 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
155 struct mmc_hsq *hsq = mmc->cqe_private;
158 spin_lock_irqsave(&hsq->lock, flags);
160 hsq->recovery_halt = true;
162 spin_unlock_irqrestore(&hsq->lock, flags);
165 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
167 struct mmc_hsq *hsq = mmc->cqe_private;
170 spin_lock_irq(&hsq->lock);
172 hsq->recovery_halt = false;
175 spin_unlock_irq(&hsq->lock);
178 * Try to pump new request if there are request pending in software
179 * queue after finishing recovery.
182 mmc_hsq_pump_requests(hsq);
185 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
187 struct mmc_hsq *hsq = mmc->cqe_private;
190 spin_lock_irq(&hsq->lock);
193 spin_unlock_irq(&hsq->lock);
197 /* Do not queue any new requests in recovery mode. */
198 if (hsq->recovery_halt) {
199 spin_unlock_irq(&hsq->lock);
203 hsq->slot[tag].mrq = mrq;
206 * Set the next tag as current request tag if no available
209 if (hsq->next_tag == HSQ_INVALID_TAG)
214 spin_unlock_irq(&hsq->lock);
216 mmc_hsq_pump_requests(hsq);
221 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
223 if (mmc->ops->post_req)
224 mmc->ops->post_req(mmc, mrq, 0);
227 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
231 spin_lock_irq(&hsq->lock);
233 is_idle = (!hsq->mrq && !hsq->qcnt) ||
236 *ret = hsq->recovery_halt ? -EBUSY : 0;
237 hsq->waiting_for_idle = !is_idle;
239 spin_unlock_irq(&hsq->lock);
244 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
246 struct mmc_hsq *hsq = mmc->cqe_private;
249 wait_event(hsq->wait_queue,
250 mmc_hsq_queue_is_idle(hsq, &ret));
255 static void mmc_hsq_disable(struct mmc_host *mmc)
257 struct mmc_hsq *hsq = mmc->cqe_private;
261 spin_lock_irq(&hsq->lock);
264 spin_unlock_irq(&hsq->lock);
268 spin_unlock_irq(&hsq->lock);
270 ret = wait_event_timeout(hsq->wait_queue,
271 mmc_hsq_queue_is_idle(hsq, &ret),
272 msecs_to_jiffies(timeout));
274 pr_warn("could not stop mmc software queue\n");
278 spin_lock_irq(&hsq->lock);
280 hsq->enabled = false;
282 spin_unlock_irq(&hsq->lock);
285 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
287 struct mmc_hsq *hsq = mmc->cqe_private;
289 spin_lock_irq(&hsq->lock);
292 spin_unlock_irq(&hsq->lock);
298 spin_unlock_irq(&hsq->lock);
303 static const struct mmc_cqe_ops mmc_hsq_ops = {
304 .cqe_enable = mmc_hsq_enable,
305 .cqe_disable = mmc_hsq_disable,
306 .cqe_request = mmc_hsq_request,
307 .cqe_post_req = mmc_hsq_post_req,
308 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
309 .cqe_recovery_start = mmc_hsq_recovery_start,
310 .cqe_recovery_finish = mmc_hsq_recovery_finish,
313 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
315 hsq->num_slots = HSQ_NUM_SLOTS;
316 hsq->next_tag = HSQ_INVALID_TAG;
318 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
319 sizeof(struct hsq_slot), GFP_KERNEL);
324 hsq->mmc->cqe_private = hsq;
325 mmc->cqe_ops = &mmc_hsq_ops;
327 spin_lock_init(&hsq->lock);
328 init_waitqueue_head(&hsq->wait_queue);
332 EXPORT_SYMBOL_GPL(mmc_hsq_init);
334 void mmc_hsq_suspend(struct mmc_host *mmc)
336 mmc_hsq_disable(mmc);
338 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
340 int mmc_hsq_resume(struct mmc_host *mmc)
342 return mmc_hsq_enable(mmc, NULL);
344 EXPORT_SYMBOL_GPL(mmc_hsq_resume);