mmc: Add MMC host software queue support
[linux-2.6-microblaze.git] / drivers / mmc / host / mmc_hsq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * MMC software queue support based on command queue interfaces
5  *
6  * Copyright (C) 2019 Linaro, Inc.
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12
13 #include "mmc_hsq.h"
14
15 #define HSQ_NUM_SLOTS   64
16 #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
17
18 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
19 {
20         struct mmc_host *mmc = hsq->mmc;
21         struct hsq_slot *slot;
22         unsigned long flags;
23
24         spin_lock_irqsave(&hsq->lock, flags);
25
26         /* Make sure we are not already running a request now */
27         if (hsq->mrq) {
28                 spin_unlock_irqrestore(&hsq->lock, flags);
29                 return;
30         }
31
32         /* Make sure there are remain requests need to pump */
33         if (!hsq->qcnt || !hsq->enabled) {
34                 spin_unlock_irqrestore(&hsq->lock, flags);
35                 return;
36         }
37
38         slot = &hsq->slot[hsq->next_tag];
39         hsq->mrq = slot->mrq;
40         hsq->qcnt--;
41
42         spin_unlock_irqrestore(&hsq->lock, flags);
43
44         mmc->ops->request(mmc, hsq->mrq);
45 }
46
47 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
48 {
49         struct hsq_slot *slot;
50         int tag;
51
52         /*
53          * If there are no remain requests in software queue, then set a invalid
54          * tag.
55          */
56         if (!remains) {
57                 hsq->next_tag = HSQ_INVALID_TAG;
58                 return;
59         }
60
61         /*
62          * Increasing the next tag and check if the corresponding request is
63          * available, if yes, then we found a candidate request.
64          */
65         if (++hsq->next_tag != HSQ_INVALID_TAG) {
66                 slot = &hsq->slot[hsq->next_tag];
67                 if (slot->mrq)
68                         return;
69         }
70
71         /* Othersie we should iterate all slots to find a available tag. */
72         for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
73                 slot = &hsq->slot[tag];
74                 if (slot->mrq)
75                         break;
76         }
77
78         if (tag == HSQ_NUM_SLOTS)
79                 tag = HSQ_INVALID_TAG;
80
81         hsq->next_tag = tag;
82 }
83
84 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
85 {
86         unsigned long flags;
87         int remains;
88
89         spin_lock_irqsave(&hsq->lock, flags);
90
91         remains = hsq->qcnt;
92         hsq->mrq = NULL;
93
94         /* Update the next available tag to be queued. */
95         mmc_hsq_update_next_tag(hsq, remains);
96
97         if (hsq->waiting_for_idle && !remains) {
98                 hsq->waiting_for_idle = false;
99                 wake_up(&hsq->wait_queue);
100         }
101
102         /* Do not pump new request in recovery mode. */
103         if (hsq->recovery_halt) {
104                 spin_unlock_irqrestore(&hsq->lock, flags);
105                 return;
106         }
107
108         spin_unlock_irqrestore(&hsq->lock, flags);
109
110          /*
111           * Try to pump new request to host controller as fast as possible,
112           * after completing previous request.
113           */
114         if (remains > 0)
115                 mmc_hsq_pump_requests(hsq);
116 }
117
118 /**
119  * mmc_hsq_finalize_request - finalize one request if the request is done
120  * @mmc: the host controller
121  * @mrq: the request need to be finalized
122  *
123  * Return true if we finalized the corresponding request in software queue,
124  * otherwise return false.
125  */
126 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
127 {
128         struct mmc_hsq *hsq = mmc->cqe_private;
129         unsigned long flags;
130
131         spin_lock_irqsave(&hsq->lock, flags);
132
133         if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
134                 spin_unlock_irqrestore(&hsq->lock, flags);
135                 return false;
136         }
137
138         /*
139          * Clear current completed slot request to make a room for new request.
140          */
141         hsq->slot[hsq->next_tag].mrq = NULL;
142
143         spin_unlock_irqrestore(&hsq->lock, flags);
144
145         mmc_cqe_request_done(mmc, hsq->mrq);
146
147         mmc_hsq_post_request(hsq);
148
149         return true;
150 }
151 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
152
153 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
154 {
155         struct mmc_hsq *hsq = mmc->cqe_private;
156         unsigned long flags;
157
158         spin_lock_irqsave(&hsq->lock, flags);
159
160         hsq->recovery_halt = true;
161
162         spin_unlock_irqrestore(&hsq->lock, flags);
163 }
164
165 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
166 {
167         struct mmc_hsq *hsq = mmc->cqe_private;
168         int remains;
169
170         spin_lock_irq(&hsq->lock);
171
172         hsq->recovery_halt = false;
173         remains = hsq->qcnt;
174
175         spin_unlock_irq(&hsq->lock);
176
177         /*
178          * Try to pump new request if there are request pending in software
179          * queue after finishing recovery.
180          */
181         if (remains > 0)
182                 mmc_hsq_pump_requests(hsq);
183 }
184
185 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
186 {
187         struct mmc_hsq *hsq = mmc->cqe_private;
188         int tag = mrq->tag;
189
190         spin_lock_irq(&hsq->lock);
191
192         if (!hsq->enabled) {
193                 spin_unlock_irq(&hsq->lock);
194                 return -ESHUTDOWN;
195         }
196
197         /* Do not queue any new requests in recovery mode. */
198         if (hsq->recovery_halt) {
199                 spin_unlock_irq(&hsq->lock);
200                 return -EBUSY;
201         }
202
203         hsq->slot[tag].mrq = mrq;
204
205         /*
206          * Set the next tag as current request tag if no available
207          * next tag.
208          */
209         if (hsq->next_tag == HSQ_INVALID_TAG)
210                 hsq->next_tag = tag;
211
212         hsq->qcnt++;
213
214         spin_unlock_irq(&hsq->lock);
215
216         mmc_hsq_pump_requests(hsq);
217
218         return 0;
219 }
220
221 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
222 {
223         if (mmc->ops->post_req)
224                 mmc->ops->post_req(mmc, mrq, 0);
225 }
226
227 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
228 {
229         bool is_idle;
230
231         spin_lock_irq(&hsq->lock);
232
233         is_idle = (!hsq->mrq && !hsq->qcnt) ||
234                 hsq->recovery_halt;
235
236         *ret = hsq->recovery_halt ? -EBUSY : 0;
237         hsq->waiting_for_idle = !is_idle;
238
239         spin_unlock_irq(&hsq->lock);
240
241         return is_idle;
242 }
243
244 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
245 {
246         struct mmc_hsq *hsq = mmc->cqe_private;
247         int ret;
248
249         wait_event(hsq->wait_queue,
250                    mmc_hsq_queue_is_idle(hsq, &ret));
251
252         return ret;
253 }
254
255 static void mmc_hsq_disable(struct mmc_host *mmc)
256 {
257         struct mmc_hsq *hsq = mmc->cqe_private;
258         u32 timeout = 500;
259         int ret;
260
261         spin_lock_irq(&hsq->lock);
262
263         if (!hsq->enabled) {
264                 spin_unlock_irq(&hsq->lock);
265                 return;
266         }
267
268         spin_unlock_irq(&hsq->lock);
269
270         ret = wait_event_timeout(hsq->wait_queue,
271                                  mmc_hsq_queue_is_idle(hsq, &ret),
272                                  msecs_to_jiffies(timeout));
273         if (ret == 0) {
274                 pr_warn("could not stop mmc software queue\n");
275                 return;
276         }
277
278         spin_lock_irq(&hsq->lock);
279
280         hsq->enabled = false;
281
282         spin_unlock_irq(&hsq->lock);
283 }
284
285 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
286 {
287         struct mmc_hsq *hsq = mmc->cqe_private;
288
289         spin_lock_irq(&hsq->lock);
290
291         if (hsq->enabled) {
292                 spin_unlock_irq(&hsq->lock);
293                 return -EBUSY;
294         }
295
296         hsq->enabled = true;
297
298         spin_unlock_irq(&hsq->lock);
299
300         return 0;
301 }
302
303 static const struct mmc_cqe_ops mmc_hsq_ops = {
304         .cqe_enable = mmc_hsq_enable,
305         .cqe_disable = mmc_hsq_disable,
306         .cqe_request = mmc_hsq_request,
307         .cqe_post_req = mmc_hsq_post_req,
308         .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
309         .cqe_recovery_start = mmc_hsq_recovery_start,
310         .cqe_recovery_finish = mmc_hsq_recovery_finish,
311 };
312
313 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
314 {
315         hsq->num_slots = HSQ_NUM_SLOTS;
316         hsq->next_tag = HSQ_INVALID_TAG;
317
318         hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
319                                  sizeof(struct hsq_slot), GFP_KERNEL);
320         if (!hsq->slot)
321                 return -ENOMEM;
322
323         hsq->mmc = mmc;
324         hsq->mmc->cqe_private = hsq;
325         mmc->cqe_ops = &mmc_hsq_ops;
326
327         spin_lock_init(&hsq->lock);
328         init_waitqueue_head(&hsq->wait_queue);
329
330         return 0;
331 }
332 EXPORT_SYMBOL_GPL(mmc_hsq_init);
333
334 void mmc_hsq_suspend(struct mmc_host *mmc)
335 {
336         mmc_hsq_disable(mmc);
337 }
338 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
339
340 int mmc_hsq_resume(struct mmc_host *mmc)
341 {
342         return mmc_hsq_enable(mmc, NULL);
343 }
344 EXPORT_SYMBOL_GPL(mmc_hsq_resume);