1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 #include <linux/delay.h>
6 #include <linux/highmem.h>
8 #include <linux/iopoll.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/scatterlist.h>
13 #include <linux/platform_device.h>
14 #include <linux/ktime.h>
16 #include <linux/mmc/mmc.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
21 #include "cqhci-crypto.h"
27 struct mmc_request *mrq;
29 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
30 #define CQHCI_COMPLETED BIT(1)
31 #define CQHCI_HOST_CRC BIT(2)
32 #define CQHCI_HOST_TIMEOUT BIT(3)
33 #define CQHCI_HOST_OTHER BIT(4)
36 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
38 return cq_host->desc_base + (tag * cq_host->slot_sz);
41 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
43 u8 *desc = get_desc(cq_host, tag);
45 return desc + cq_host->task_desc_len;
48 static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
50 return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
53 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
55 size_t offset = get_trans_desc_offset(cq_host, tag);
57 return cq_host->trans_desc_dma_base + offset;
60 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
62 size_t offset = get_trans_desc_offset(cq_host, tag);
64 return cq_host->trans_desc_base + offset;
67 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
70 dma_addr_t trans_temp;
72 link_temp = get_link_desc(cq_host, tag);
73 trans_temp = get_trans_desc_dma(cq_host, tag);
75 memset(link_temp, 0, cq_host->link_desc_len);
76 if (cq_host->link_desc_len > 8)
79 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
80 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
84 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
87 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
89 data_addr[0] = cpu_to_le64(trans_temp);
91 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
93 data_addr[0] = cpu_to_le32(trans_temp);
97 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
99 cqhci_writel(cq_host, set, CQHCI_ISTE);
100 cqhci_writel(cq_host, set, CQHCI_ISGE);
103 #define DRV_NAME "cqhci"
105 #define CQHCI_DUMP(f, x...) \
106 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
108 static void cqhci_dumpregs(struct cqhci_host *cq_host)
110 struct mmc_host *mmc = cq_host->mmc;
112 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
114 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
115 cqhci_readl(cq_host, CQHCI_CAP),
116 cqhci_readl(cq_host, CQHCI_VER));
117 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
118 cqhci_readl(cq_host, CQHCI_CFG),
119 cqhci_readl(cq_host, CQHCI_CTL));
120 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
121 cqhci_readl(cq_host, CQHCI_IS),
122 cqhci_readl(cq_host, CQHCI_ISTE));
123 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
124 cqhci_readl(cq_host, CQHCI_ISGE),
125 cqhci_readl(cq_host, CQHCI_IC));
126 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
127 cqhci_readl(cq_host, CQHCI_TDLBA),
128 cqhci_readl(cq_host, CQHCI_TDLBAU));
129 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
130 cqhci_readl(cq_host, CQHCI_TDBR),
131 cqhci_readl(cq_host, CQHCI_TCN));
132 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
133 cqhci_readl(cq_host, CQHCI_DQS),
134 cqhci_readl(cq_host, CQHCI_DPT));
135 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
136 cqhci_readl(cq_host, CQHCI_TCLR),
137 cqhci_readl(cq_host, CQHCI_SSC1));
138 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
139 cqhci_readl(cq_host, CQHCI_SSC2),
140 cqhci_readl(cq_host, CQHCI_CRDCT));
141 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
142 cqhci_readl(cq_host, CQHCI_RMEM),
143 cqhci_readl(cq_host, CQHCI_TERRI));
144 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
145 cqhci_readl(cq_host, CQHCI_CRI),
146 cqhci_readl(cq_host, CQHCI_CRA));
148 if (cq_host->ops->dumpregs)
149 cq_host->ops->dumpregs(mmc);
151 CQHCI_DUMP(": ===========================================\n");
155 * The allocated descriptor table for task, link & transfer descriptors
158 * |task desc | |->|----------|
159 * |----------| | |trans desc|
160 * |link desc-|->| |----------|
163 * no. of slots max-segs
166 * The idea here is to create the [task+trans] table and mark & point the
167 * link desc to the transfer desc table on a per slot basis.
169 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
173 /* task descriptor can be 64/128 bit irrespective of arch */
174 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
175 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
176 CQHCI_TASK_DESC_SZ, CQHCI_CFG);
177 cq_host->task_desc_len = 16;
179 cq_host->task_desc_len = 8;
183 * 96 bits length of transfer desc instead of 128 bits which means
184 * ADMA would expect next valid descriptor at the 96th bit
187 if (cq_host->dma64) {
188 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
189 cq_host->trans_desc_len = 12;
191 cq_host->trans_desc_len = 16;
192 cq_host->link_desc_len = 16;
194 cq_host->trans_desc_len = 8;
195 cq_host->link_desc_len = 8;
198 /* total size of a slot: 1 task & 1 transfer (link) */
199 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
201 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
203 cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
205 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
206 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
210 * allocate a dma-mapped chunk of memory for the descriptors
211 * allocate a dma-mapped chunk of memory for link descriptors
212 * setup each link-desc memory offset per slot-number to
213 * the descriptor table.
215 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
217 &cq_host->desc_dma_base,
219 if (!cq_host->desc_base)
222 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
224 &cq_host->trans_desc_dma_base,
226 if (!cq_host->trans_desc_base) {
227 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
229 cq_host->desc_dma_base);
230 cq_host->desc_base = NULL;
231 cq_host->desc_dma_base = 0;
235 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
236 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
237 (unsigned long long)cq_host->desc_dma_base,
238 (unsigned long long)cq_host->trans_desc_dma_base);
240 for (; i < (cq_host->num_slots); i++)
241 setup_trans_desc(cq_host, i);
246 static void __cqhci_enable(struct cqhci_host *cq_host)
248 struct mmc_host *mmc = cq_host->mmc;
251 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
253 /* Configuration must not be changed while enabled */
254 if (cqcfg & CQHCI_ENABLE) {
255 cqcfg &= ~CQHCI_ENABLE;
256 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
259 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
261 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
264 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
265 cqcfg |= CQHCI_TASK_DESC_SZ;
267 if (mmc->caps2 & MMC_CAP2_CRYPTO)
268 cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
270 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
272 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
274 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
277 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
279 cqhci_set_irqs(cq_host, 0);
281 cqcfg |= CQHCI_ENABLE;
283 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
287 if (cq_host->ops->enable)
288 cq_host->ops->enable(mmc);
290 /* Ensure all writes are done before interrupts are enabled */
293 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
295 cq_host->activated = true;
298 static void __cqhci_disable(struct cqhci_host *cq_host)
302 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
303 cqcfg &= ~CQHCI_ENABLE;
304 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
306 cq_host->mmc->cqe_on = false;
308 cq_host->activated = false;
311 int cqhci_deactivate(struct mmc_host *mmc)
313 struct cqhci_host *cq_host = mmc->cqe_private;
315 if (cq_host->enabled && cq_host->activated)
316 __cqhci_disable(cq_host);
320 EXPORT_SYMBOL(cqhci_deactivate);
322 int cqhci_resume(struct mmc_host *mmc)
324 /* Re-enable is done upon first request */
327 EXPORT_SYMBOL(cqhci_resume);
329 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
331 struct cqhci_host *cq_host = mmc->cqe_private;
334 if (!card->ext_csd.cmdq_en)
337 if (cq_host->enabled)
340 cq_host->rca = card->rca;
342 err = cqhci_host_alloc_tdl(cq_host);
344 pr_err("%s: Failed to enable CQE, error %d\n",
345 mmc_hostname(mmc), err);
349 __cqhci_enable(cq_host);
351 cq_host->enabled = true;
354 cqhci_dumpregs(cq_host);
359 /* CQHCI is idle and should halt immediately, so set a small timeout */
360 #define CQHCI_OFF_TIMEOUT 100
362 static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
364 return cqhci_readl(cq_host, CQHCI_CTL);
367 static void cqhci_off(struct mmc_host *mmc)
369 struct cqhci_host *cq_host = mmc->cqe_private;
373 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
376 if (cq_host->ops->disable)
377 cq_host->ops->disable(mmc, false);
379 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
381 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
382 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
384 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
386 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
388 if (cq_host->ops->post_disable)
389 cq_host->ops->post_disable(mmc);
394 static void cqhci_disable(struct mmc_host *mmc)
396 struct cqhci_host *cq_host = mmc->cqe_private;
398 if (!cq_host->enabled)
403 __cqhci_disable(cq_host);
405 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
406 cq_host->trans_desc_base,
407 cq_host->trans_desc_dma_base);
409 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
411 cq_host->desc_dma_base);
413 cq_host->trans_desc_base = NULL;
414 cq_host->desc_base = NULL;
416 cq_host->enabled = false;
419 static void cqhci_prep_task_desc(struct mmc_request *mrq,
420 struct cqhci_host *cq_host, int tag)
422 __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
423 u32 req_flags = mrq->data->flags;
426 desc0 = CQHCI_VALID(1) |
430 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
431 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
432 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
433 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
434 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
435 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
436 CQHCI_BLK_COUNT(mrq->data->blocks) |
437 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
439 task_desc[0] = cpu_to_le64(desc0);
441 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
442 u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
444 task_desc[1] = cpu_to_le64(desc1);
446 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
447 mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
449 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
450 mmc_hostname(mrq->host), mrq->tag, desc0);
454 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
457 struct mmc_data *data = mrq->data;
462 sg_count = dma_map_sg(mmc_dev(host), data->sg,
464 (data->flags & MMC_DATA_WRITE) ?
465 DMA_TO_DEVICE : DMA_FROM_DEVICE);
467 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
474 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
477 __le32 *attr = (__le32 __force *)desc;
479 *attr = (CQHCI_VALID(1) |
480 CQHCI_END(end ? 1 : 0) |
483 CQHCI_DAT_LENGTH(len));
486 __le64 *dataddr = (__le64 __force *)(desc + 4);
488 dataddr[0] = cpu_to_le64(addr);
490 __le32 *dataddr = (__le32 __force *)(desc + 4);
492 dataddr[0] = cpu_to_le32(addr);
496 static int cqhci_prep_tran_desc(struct mmc_request *mrq,
497 struct cqhci_host *cq_host, int tag)
499 struct mmc_data *data = mrq->data;
500 int i, sg_count, len;
502 bool dma64 = cq_host->dma64;
505 struct scatterlist *sg;
507 sg_count = cqhci_dma_map(mrq->host, mrq);
509 pr_err("%s: %s: unable to map sg lists, %d\n",
510 mmc_hostname(mrq->host), __func__, sg_count);
514 desc = get_trans_desc(cq_host, tag);
516 for_each_sg(data->sg, sg, sg_count, i) {
517 addr = sg_dma_address(sg);
518 len = sg_dma_len(sg);
520 if ((i+1) == sg_count)
522 cqhci_set_tran_desc(desc, addr, len, end, dma64);
523 desc += cq_host->trans_desc_len;
529 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
530 struct mmc_request *mrq)
532 u64 *task_desc = NULL;
537 struct cqhci_host *cq_host = mmc->cqe_private;
540 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
544 if (mrq->cmd->flags & MMC_RSP_R1B) {
553 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
554 memset(task_desc, 0, cq_host->task_desc_len);
555 data |= (CQHCI_VALID(1) |
560 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
561 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
562 if (cq_host->ops->update_dcmd_desc)
563 cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
565 desc = (u8 *)task_desc;
566 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
567 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
568 dataddr = (__le64 __force *)(desc + 4);
569 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
573 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
575 struct mmc_data *data = mrq->data;
578 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
579 (data->flags & MMC_DATA_READ) ?
580 DMA_FROM_DEVICE : DMA_TO_DEVICE);
584 static inline int cqhci_tag(struct mmc_request *mrq)
586 return mrq->cmd ? DCMD_SLOT : mrq->tag;
589 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
592 int tag = cqhci_tag(mrq);
593 struct cqhci_host *cq_host = mmc->cqe_private;
596 if (!cq_host->enabled) {
597 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
601 /* First request after resume has to re-enable */
602 if (!cq_host->activated)
603 __cqhci_enable(cq_host);
606 if (cq_host->ops->pre_enable)
607 cq_host->ops->pre_enable(mmc);
609 cqhci_writel(cq_host, 0, CQHCI_CTL);
611 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
612 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
613 pr_err("%s: cqhci: CQE failed to exit halt state\n",
616 if (cq_host->ops->enable)
617 cq_host->ops->enable(mmc);
621 cqhci_prep_task_desc(mrq, cq_host, tag);
623 err = cqhci_prep_tran_desc(mrq, cq_host, tag);
625 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
626 mmc_hostname(mmc), err);
630 cqhci_prep_dcmd_desc(mmc, mrq);
633 spin_lock_irqsave(&cq_host->lock, flags);
635 if (cq_host->recovery_halt) {
640 cq_host->slot[tag].mrq = mrq;
641 cq_host->slot[tag].flags = 0;
644 /* Make sure descriptors are ready before ringing the doorbell */
646 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
647 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
648 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
649 mmc_hostname(mmc), tag);
651 spin_unlock_irqrestore(&cq_host->lock, flags);
654 cqhci_post_req(mmc, mrq);
659 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
662 struct cqhci_host *cq_host = mmc->cqe_private;
664 if (!cq_host->recovery_halt) {
665 cq_host->recovery_halt = true;
666 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
667 wake_up(&cq_host->wait_queue);
668 if (notify && mrq->recovery_notifier)
669 mrq->recovery_notifier(mrq);
673 static unsigned int cqhci_error_flags(int error1, int error2)
675 int error = error1 ? error1 : error2;
679 return CQHCI_HOST_CRC;
681 return CQHCI_HOST_TIMEOUT;
683 return CQHCI_HOST_OTHER;
687 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
690 struct cqhci_host *cq_host = mmc->cqe_private;
691 struct cqhci_slot *slot;
696 spin_lock(&cq_host->lock);
698 terri = cqhci_readl(cq_host, CQHCI_TERRI);
700 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
701 mmc_hostname(mmc), status, cmd_error, data_error, terri);
703 /* Forget about errors when recovery has already been triggered */
704 if (cq_host->recovery_halt)
707 if (!cq_host->qcnt) {
708 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
709 mmc_hostname(mmc), status, cmd_error, data_error,
714 if (CQHCI_TERRI_C_VALID(terri)) {
715 tag = CQHCI_TERRI_C_TASK(terri);
716 slot = &cq_host->slot[tag];
718 slot->flags = cqhci_error_flags(cmd_error, data_error);
719 cqhci_recovery_needed(mmc, slot->mrq, true);
723 if (CQHCI_TERRI_D_VALID(terri)) {
724 tag = CQHCI_TERRI_D_TASK(terri);
725 slot = &cq_host->slot[tag];
727 slot->flags = cqhci_error_flags(data_error, cmd_error);
728 cqhci_recovery_needed(mmc, slot->mrq, true);
733 * Handle ICCE ("Invalid Crypto Configuration Error"). This should
734 * never happen, since the block layer ensures that all crypto-enabled
735 * I/O requests have a valid keyslot before they reach the driver.
737 * Note that GCE ("General Crypto Error") is different; it already got
738 * handled above by checking TERRI.
740 if (status & CQHCI_IS_ICCE) {
741 tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
743 "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
744 mmc_hostname(mmc), status, tdpe);
748 slot = &cq_host->slot[tag];
751 slot->flags = cqhci_error_flags(data_error, cmd_error);
752 cqhci_recovery_needed(mmc, slot->mrq, true);
756 if (!cq_host->recovery_halt) {
758 * The only way to guarantee forward progress is to mark at
759 * least one task in error, so if none is indicated, pick one.
761 for (tag = 0; tag < NUM_SLOTS; tag++) {
762 slot = &cq_host->slot[tag];
765 slot->flags = cqhci_error_flags(data_error, cmd_error);
766 cqhci_recovery_needed(mmc, slot->mrq, true);
772 spin_unlock(&cq_host->lock);
775 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
777 struct cqhci_host *cq_host = mmc->cqe_private;
778 struct cqhci_slot *slot = &cq_host->slot[tag];
779 struct mmc_request *mrq = slot->mrq;
780 struct mmc_data *data;
783 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
784 mmc_hostname(mmc), tag);
788 /* No completions allowed during recovery */
789 if (cq_host->recovery_halt) {
790 slot->flags |= CQHCI_COMPLETED;
801 data->bytes_xfered = 0;
803 data->bytes_xfered = data->blksz * data->blocks;
806 mmc_cqe_request_done(mmc, mrq);
809 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
813 unsigned long tag = 0, comp_status;
814 struct cqhci_host *cq_host = mmc->cqe_private;
816 status = cqhci_readl(cq_host, CQHCI_IS);
817 cqhci_writel(cq_host, status, CQHCI_IS);
819 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
821 if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
822 cmd_error || data_error)
823 cqhci_error_irq(mmc, status, cmd_error, data_error);
825 if (status & CQHCI_IS_TCC) {
826 /* read TCN and complete the request */
827 comp_status = cqhci_readl(cq_host, CQHCI_TCN);
828 cqhci_writel(cq_host, comp_status, CQHCI_TCN);
829 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
830 mmc_hostname(mmc), comp_status);
832 spin_lock(&cq_host->lock);
834 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
835 /* complete the corresponding mrq */
836 pr_debug("%s: cqhci: completing tag %lu\n",
837 mmc_hostname(mmc), tag);
838 cqhci_finish_mrq(mmc, tag);
841 if (cq_host->waiting_for_idle && !cq_host->qcnt) {
842 cq_host->waiting_for_idle = false;
843 wake_up(&cq_host->wait_queue);
846 spin_unlock(&cq_host->lock);
849 if (status & CQHCI_IS_TCL)
850 wake_up(&cq_host->wait_queue);
852 if (status & CQHCI_IS_HAC)
853 wake_up(&cq_host->wait_queue);
857 EXPORT_SYMBOL(cqhci_irq);
859 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
864 spin_lock_irqsave(&cq_host->lock, flags);
865 is_idle = !cq_host->qcnt || cq_host->recovery_halt;
866 *ret = cq_host->recovery_halt ? -EBUSY : 0;
867 cq_host->waiting_for_idle = !is_idle;
868 spin_unlock_irqrestore(&cq_host->lock, flags);
873 static int cqhci_wait_for_idle(struct mmc_host *mmc)
875 struct cqhci_host *cq_host = mmc->cqe_private;
878 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
883 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
884 bool *recovery_needed)
886 struct cqhci_host *cq_host = mmc->cqe_private;
887 int tag = cqhci_tag(mrq);
888 struct cqhci_slot *slot = &cq_host->slot[tag];
892 spin_lock_irqsave(&cq_host->lock, flags);
893 timed_out = slot->mrq == mrq;
895 slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
896 cqhci_recovery_needed(mmc, mrq, false);
897 *recovery_needed = cq_host->recovery_halt;
899 spin_unlock_irqrestore(&cq_host->lock, flags);
902 pr_err("%s: cqhci: timeout for tag %d\n",
903 mmc_hostname(mmc), tag);
904 cqhci_dumpregs(cq_host);
910 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
912 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
915 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
917 struct cqhci_host *cq_host = mmc->cqe_private;
921 cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
923 ctl = cqhci_readl(cq_host, CQHCI_CTL);
924 ctl |= CQHCI_CLEAR_ALL_TASKS;
925 cqhci_writel(cq_host, ctl, CQHCI_CTL);
927 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
928 msecs_to_jiffies(timeout) + 1);
930 cqhci_set_irqs(cq_host, 0);
932 ret = cqhci_tasks_cleared(cq_host);
935 pr_debug("%s: cqhci: Failed to clear tasks\n",
941 static bool cqhci_halted(struct cqhci_host *cq_host)
943 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
946 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
948 struct cqhci_host *cq_host = mmc->cqe_private;
952 if (cqhci_halted(cq_host))
955 cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
957 ctl = cqhci_readl(cq_host, CQHCI_CTL);
959 cqhci_writel(cq_host, ctl, CQHCI_CTL);
961 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
962 msecs_to_jiffies(timeout) + 1);
964 cqhci_set_irqs(cq_host, 0);
966 ret = cqhci_halted(cq_host);
969 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
975 * After halting we expect to be able to use the command line. We interpret the
976 * failure to halt to mean the data lines might still be in use (and the upper
977 * layers will need to send a STOP command), so we set the timeout based on a
978 * generous command timeout.
980 #define CQHCI_START_HALT_TIMEOUT 5
982 static void cqhci_recovery_start(struct mmc_host *mmc)
984 struct cqhci_host *cq_host = mmc->cqe_private;
986 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
988 WARN_ON(!cq_host->recovery_halt);
990 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
992 if (cq_host->ops->disable)
993 cq_host->ops->disable(mmc, true);
998 static int cqhci_error_from_flags(unsigned int flags)
1003 /* CRC errors might indicate re-tuning so prefer to report that */
1004 if (flags & CQHCI_HOST_CRC)
1007 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1013 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1015 struct cqhci_slot *slot = &cq_host->slot[tag];
1016 struct mmc_request *mrq = slot->mrq;
1017 struct mmc_data *data;
1028 data->bytes_xfered = 0;
1029 data->error = cqhci_error_from_flags(slot->flags);
1031 mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1034 mmc_cqe_request_done(cq_host->mmc, mrq);
1037 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1041 for (i = 0; i < cq_host->num_slots; i++)
1042 cqhci_recover_mrq(cq_host, i);
1046 * By now the command and data lines should be unused so there is no reason for
1047 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1048 * problems clearing tasks, so be generous.
1050 #define CQHCI_FINISH_HALT_TIMEOUT 20
1052 /* CQHCI could be expected to clear it's internal state pretty quickly */
1053 #define CQHCI_CLEAR_TIMEOUT 20
1055 static void cqhci_recovery_finish(struct mmc_host *mmc)
1057 struct cqhci_host *cq_host = mmc->cqe_private;
1058 unsigned long flags;
1062 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1064 WARN_ON(!cq_host->recovery_halt);
1066 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1068 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1072 * The specification contradicts itself, by saying that tasks cannot be
1073 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1074 * be disabled/re-enabled, but not to disable before clearing tasks.
1078 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1079 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1080 cqcfg &= ~CQHCI_ENABLE;
1081 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1082 cqcfg |= CQHCI_ENABLE;
1083 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1084 /* Be sure that there are no tasks */
1085 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1086 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1091 cqhci_recover_mrqs(cq_host);
1093 WARN_ON(cq_host->qcnt);
1095 spin_lock_irqsave(&cq_host->lock, flags);
1097 cq_host->recovery_halt = false;
1098 mmc->cqe_on = false;
1099 spin_unlock_irqrestore(&cq_host->lock, flags);
1101 /* Ensure all writes are done before interrupts are re-enabled */
1104 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1106 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1108 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1111 static const struct mmc_cqe_ops cqhci_cqe_ops = {
1112 .cqe_enable = cqhci_enable,
1113 .cqe_disable = cqhci_disable,
1114 .cqe_request = cqhci_request,
1115 .cqe_post_req = cqhci_post_req,
1116 .cqe_off = cqhci_off,
1117 .cqe_wait_for_idle = cqhci_wait_for_idle,
1118 .cqe_timeout = cqhci_timeout,
1119 .cqe_recovery_start = cqhci_recovery_start,
1120 .cqe_recovery_finish = cqhci_recovery_finish,
1123 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1125 struct cqhci_host *cq_host;
1126 struct resource *cqhci_memres = NULL;
1128 /* check and setup CMDQ interface */
1129 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1131 if (!cqhci_memres) {
1132 dev_dbg(&pdev->dev, "CMDQ not supported\n");
1133 return ERR_PTR(-EINVAL);
1136 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1138 return ERR_PTR(-ENOMEM);
1139 cq_host->mmio = devm_ioremap(&pdev->dev,
1140 cqhci_memres->start,
1141 resource_size(cqhci_memres));
1142 if (!cq_host->mmio) {
1143 dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1144 return ERR_PTR(-EBUSY);
1146 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1150 EXPORT_SYMBOL(cqhci_pltfm_init);
1152 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1154 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1157 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1159 u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1161 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1164 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1169 cq_host->dma64 = dma64;
1171 cq_host->mmc->cqe_private = cq_host;
1173 cq_host->num_slots = NUM_SLOTS;
1174 cq_host->dcmd_slot = DCMD_SLOT;
1176 mmc->cqe_ops = &cqhci_cqe_ops;
1178 mmc->cqe_qdepth = NUM_SLOTS;
1179 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1180 mmc->cqe_qdepth -= 1;
1182 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1183 sizeof(*cq_host->slot), GFP_KERNEL);
1184 if (!cq_host->slot) {
1189 err = cqhci_crypto_init(cq_host);
1191 pr_err("%s: CQHCI crypto initialization failed\n",
1196 spin_lock_init(&cq_host->lock);
1198 init_completion(&cq_host->halt_comp);
1199 init_waitqueue_head(&cq_host->wait_queue);
1201 pr_info("%s: CQHCI version %u.%02u\n",
1202 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1203 cqhci_ver_minor(cq_host));
1208 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1209 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1210 cqhci_ver_minor(cq_host), err);
1213 EXPORT_SYMBOL(cqhci_init);
1215 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1216 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1217 MODULE_LICENSE("GPL v2");