1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
16 #include <linux/module.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/scatterlist.h>
20 #include <linux/platform_device.h>
21 #include <linux/ktime.h>
23 #include <linux/mmc/mmc.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
33 struct mmc_request *mrq;
35 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
36 #define CQHCI_COMPLETED BIT(1)
37 #define CQHCI_HOST_CRC BIT(2)
38 #define CQHCI_HOST_TIMEOUT BIT(3)
39 #define CQHCI_HOST_OTHER BIT(4)
42 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
44 return cq_host->desc_base + (tag * cq_host->slot_sz);
47 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
49 u8 *desc = get_desc(cq_host, tag);
51 return desc + cq_host->task_desc_len;
54 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
56 return cq_host->trans_desc_dma_base +
57 (cq_host->mmc->max_segs * tag *
58 cq_host->trans_desc_len);
61 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
63 return cq_host->trans_desc_base +
64 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
67 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
70 dma_addr_t trans_temp;
72 link_temp = get_link_desc(cq_host, tag);
73 trans_temp = get_trans_desc_dma(cq_host, tag);
75 memset(link_temp, 0, cq_host->link_desc_len);
76 if (cq_host->link_desc_len > 8)
79 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
80 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
84 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
87 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
89 data_addr[0] = cpu_to_le64(trans_temp);
91 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
93 data_addr[0] = cpu_to_le32(trans_temp);
97 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
99 cqhci_writel(cq_host, set, CQHCI_ISTE);
100 cqhci_writel(cq_host, set, CQHCI_ISGE);
103 #define DRV_NAME "cqhci"
105 #define CQHCI_DUMP(f, x...) \
106 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
108 static void cqhci_dumpregs(struct cqhci_host *cq_host)
110 struct mmc_host *mmc = cq_host->mmc;
112 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
114 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
115 cqhci_readl(cq_host, CQHCI_CAP),
116 cqhci_readl(cq_host, CQHCI_VER));
117 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
118 cqhci_readl(cq_host, CQHCI_CFG),
119 cqhci_readl(cq_host, CQHCI_CTL));
120 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
121 cqhci_readl(cq_host, CQHCI_IS),
122 cqhci_readl(cq_host, CQHCI_ISTE));
123 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
124 cqhci_readl(cq_host, CQHCI_ISGE),
125 cqhci_readl(cq_host, CQHCI_IC));
126 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
127 cqhci_readl(cq_host, CQHCI_TDLBA),
128 cqhci_readl(cq_host, CQHCI_TDLBAU));
129 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
130 cqhci_readl(cq_host, CQHCI_TDBR),
131 cqhci_readl(cq_host, CQHCI_TCN));
132 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
133 cqhci_readl(cq_host, CQHCI_DQS),
134 cqhci_readl(cq_host, CQHCI_DPT));
135 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
136 cqhci_readl(cq_host, CQHCI_TCLR),
137 cqhci_readl(cq_host, CQHCI_SSC1));
138 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
139 cqhci_readl(cq_host, CQHCI_SSC2),
140 cqhci_readl(cq_host, CQHCI_CRDCT));
141 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
142 cqhci_readl(cq_host, CQHCI_RMEM),
143 cqhci_readl(cq_host, CQHCI_TERRI));
144 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
145 cqhci_readl(cq_host, CQHCI_CRI),
146 cqhci_readl(cq_host, CQHCI_CRA));
148 if (cq_host->ops->dumpregs)
149 cq_host->ops->dumpregs(mmc);
151 CQHCI_DUMP(": ===========================================\n");
155 * The allocated descriptor table for task, link & transfer descritors
158 * |task desc | |->|----------|
159 * |----------| | |trans desc|
160 * |link desc-|->| |----------|
163 * no. of slots max-segs
166 * The idea here is to create the [task+trans] table and mark & point the
167 * link desc to the transfer desc table on a per slot basis.
169 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
173 /* task descriptor can be 64/128 bit irrespective of arch */
174 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
175 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
176 CQHCI_TASK_DESC_SZ, CQHCI_CFG);
177 cq_host->task_desc_len = 16;
179 cq_host->task_desc_len = 8;
183 * 96 bits length of transfer desc instead of 128 bits which means
184 * ADMA would expect next valid descriptor at the 96th bit
187 if (cq_host->dma64) {
188 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
189 cq_host->trans_desc_len = 12;
191 cq_host->trans_desc_len = 16;
192 cq_host->link_desc_len = 16;
194 cq_host->trans_desc_len = 8;
195 cq_host->link_desc_len = 8;
198 /* total size of a slot: 1 task & 1 transfer (link) */
199 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
201 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
203 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
204 cq_host->mmc->cqe_qdepth;
206 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
207 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
211 * allocate a dma-mapped chunk of memory for the descriptors
212 * allocate a dma-mapped chunk of memory for link descriptors
213 * setup each link-desc memory offset per slot-number to
214 * the descriptor table.
216 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
218 &cq_host->desc_dma_base,
220 if (!cq_host->desc_base)
223 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
225 &cq_host->trans_desc_dma_base,
227 if (!cq_host->trans_desc_base) {
228 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
230 cq_host->desc_dma_base);
231 cq_host->desc_base = NULL;
232 cq_host->desc_dma_base = 0;
236 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
237 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
238 (unsigned long long)cq_host->desc_dma_base,
239 (unsigned long long)cq_host->trans_desc_dma_base);
241 for (; i < (cq_host->num_slots); i++)
242 setup_trans_desc(cq_host, i);
247 static void __cqhci_enable(struct cqhci_host *cq_host)
249 struct mmc_host *mmc = cq_host->mmc;
252 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
254 /* Configuration must not be changed while enabled */
255 if (cqcfg & CQHCI_ENABLE) {
256 cqcfg &= ~CQHCI_ENABLE;
257 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
260 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
262 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
265 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
266 cqcfg |= CQHCI_TASK_DESC_SZ;
268 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
270 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
272 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
275 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
277 cqhci_set_irqs(cq_host, 0);
279 cqcfg |= CQHCI_ENABLE;
281 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
285 if (cq_host->ops->enable)
286 cq_host->ops->enable(mmc);
288 /* Ensure all writes are done before interrupts are enabled */
291 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
293 cq_host->activated = true;
296 static void __cqhci_disable(struct cqhci_host *cq_host)
300 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
301 cqcfg &= ~CQHCI_ENABLE;
302 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
304 cq_host->mmc->cqe_on = false;
306 cq_host->activated = false;
309 int cqhci_suspend(struct mmc_host *mmc)
311 struct cqhci_host *cq_host = mmc->cqe_private;
313 if (cq_host->enabled)
314 __cqhci_disable(cq_host);
318 EXPORT_SYMBOL(cqhci_suspend);
320 int cqhci_resume(struct mmc_host *mmc)
322 /* Re-enable is done upon first request */
325 EXPORT_SYMBOL(cqhci_resume);
327 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
329 struct cqhci_host *cq_host = mmc->cqe_private;
332 if (cq_host->enabled)
335 cq_host->rca = card->rca;
337 err = cqhci_host_alloc_tdl(cq_host);
341 __cqhci_enable(cq_host);
343 cq_host->enabled = true;
346 cqhci_dumpregs(cq_host);
351 /* CQHCI is idle and should halt immediately, so set a small timeout */
352 #define CQHCI_OFF_TIMEOUT 100
354 static void cqhci_off(struct mmc_host *mmc)
356 struct cqhci_host *cq_host = mmc->cqe_private;
361 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
364 if (cq_host->ops->disable)
365 cq_host->ops->disable(mmc, false);
367 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
369 timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
371 timed_out = ktime_compare(ktime_get(), timeout) > 0;
372 reg = cqhci_readl(cq_host, CQHCI_CTL);
373 if ((reg & CQHCI_HALT) || timed_out)
378 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
380 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
385 static void cqhci_disable(struct mmc_host *mmc)
387 struct cqhci_host *cq_host = mmc->cqe_private;
389 if (!cq_host->enabled)
394 __cqhci_disable(cq_host);
396 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
397 cq_host->trans_desc_base,
398 cq_host->trans_desc_dma_base);
400 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
402 cq_host->desc_dma_base);
404 cq_host->trans_desc_base = NULL;
405 cq_host->desc_base = NULL;
407 cq_host->enabled = false;
410 static void cqhci_prep_task_desc(struct mmc_request *mrq,
411 u64 *data, bool intr)
413 u32 req_flags = mrq->data->flags;
415 *data = CQHCI_VALID(1) |
419 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
420 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
421 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
422 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
423 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
424 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
425 CQHCI_BLK_COUNT(mrq->data->blocks) |
426 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
428 pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
429 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
432 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
435 struct mmc_data *data = mrq->data;
440 sg_count = dma_map_sg(mmc_dev(host), data->sg,
442 (data->flags & MMC_DATA_WRITE) ?
443 DMA_TO_DEVICE : DMA_FROM_DEVICE);
445 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
452 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
455 __le32 *attr = (__le32 __force *)desc;
457 *attr = (CQHCI_VALID(1) |
458 CQHCI_END(end ? 1 : 0) |
461 CQHCI_DAT_LENGTH(len));
464 __le64 *dataddr = (__le64 __force *)(desc + 4);
466 dataddr[0] = cpu_to_le64(addr);
468 __le32 *dataddr = (__le32 __force *)(desc + 4);
470 dataddr[0] = cpu_to_le32(addr);
474 static int cqhci_prep_tran_desc(struct mmc_request *mrq,
475 struct cqhci_host *cq_host, int tag)
477 struct mmc_data *data = mrq->data;
478 int i, sg_count, len;
480 bool dma64 = cq_host->dma64;
483 struct scatterlist *sg;
485 sg_count = cqhci_dma_map(mrq->host, mrq);
487 pr_err("%s: %s: unable to map sg lists, %d\n",
488 mmc_hostname(mrq->host), __func__, sg_count);
492 desc = get_trans_desc(cq_host, tag);
494 for_each_sg(data->sg, sg, sg_count, i) {
495 addr = sg_dma_address(sg);
496 len = sg_dma_len(sg);
498 if ((i+1) == sg_count)
500 cqhci_set_tran_desc(desc, addr, len, end, dma64);
501 desc += cq_host->trans_desc_len;
507 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
508 struct mmc_request *mrq)
510 u64 *task_desc = NULL;
515 struct cqhci_host *cq_host = mmc->cqe_private;
518 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
522 if (mrq->cmd->flags & MMC_RSP_R1B) {
531 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
532 memset(task_desc, 0, cq_host->task_desc_len);
533 data |= (CQHCI_VALID(1) |
538 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
539 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
540 if (cq_host->ops->update_dcmd_desc)
541 cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
543 desc = (u8 *)task_desc;
544 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
545 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
546 dataddr = (__le64 __force *)(desc + 4);
547 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
551 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
553 struct mmc_data *data = mrq->data;
556 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
557 (data->flags & MMC_DATA_READ) ?
558 DMA_FROM_DEVICE : DMA_TO_DEVICE);
562 static inline int cqhci_tag(struct mmc_request *mrq)
564 return mrq->cmd ? DCMD_SLOT : mrq->tag;
567 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
571 u64 *task_desc = NULL;
572 int tag = cqhci_tag(mrq);
573 struct cqhci_host *cq_host = mmc->cqe_private;
576 if (!cq_host->enabled) {
577 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
581 /* First request after resume has to re-enable */
582 if (!cq_host->activated)
583 __cqhci_enable(cq_host);
586 cqhci_writel(cq_host, 0, CQHCI_CTL);
588 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
589 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
590 pr_err("%s: cqhci: CQE failed to exit halt state\n",
593 if (cq_host->ops->enable)
594 cq_host->ops->enable(mmc);
598 task_desc = (__le64 __force *)get_desc(cq_host, tag);
599 cqhci_prep_task_desc(mrq, &data, 1);
600 *task_desc = cpu_to_le64(data);
601 err = cqhci_prep_tran_desc(mrq, cq_host, tag);
603 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
604 mmc_hostname(mmc), err);
608 cqhci_prep_dcmd_desc(mmc, mrq);
611 spin_lock_irqsave(&cq_host->lock, flags);
613 if (cq_host->recovery_halt) {
618 cq_host->slot[tag].mrq = mrq;
619 cq_host->slot[tag].flags = 0;
623 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
624 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
625 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
626 mmc_hostname(mmc), tag);
628 spin_unlock_irqrestore(&cq_host->lock, flags);
631 cqhci_post_req(mmc, mrq);
636 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
639 struct cqhci_host *cq_host = mmc->cqe_private;
641 if (!cq_host->recovery_halt) {
642 cq_host->recovery_halt = true;
643 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
644 wake_up(&cq_host->wait_queue);
645 if (notify && mrq->recovery_notifier)
646 mrq->recovery_notifier(mrq);
650 static unsigned int cqhci_error_flags(int error1, int error2)
652 int error = error1 ? error1 : error2;
656 return CQHCI_HOST_CRC;
658 return CQHCI_HOST_TIMEOUT;
660 return CQHCI_HOST_OTHER;
664 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
667 struct cqhci_host *cq_host = mmc->cqe_private;
668 struct cqhci_slot *slot;
672 spin_lock(&cq_host->lock);
674 terri = cqhci_readl(cq_host, CQHCI_TERRI);
676 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
677 mmc_hostname(mmc), status, cmd_error, data_error, terri);
679 /* Forget about errors when recovery has already been triggered */
680 if (cq_host->recovery_halt)
683 if (!cq_host->qcnt) {
684 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
685 mmc_hostname(mmc), status, cmd_error, data_error,
690 if (CQHCI_TERRI_C_VALID(terri)) {
691 tag = CQHCI_TERRI_C_TASK(terri);
692 slot = &cq_host->slot[tag];
694 slot->flags = cqhci_error_flags(cmd_error, data_error);
695 cqhci_recovery_needed(mmc, slot->mrq, true);
699 if (CQHCI_TERRI_D_VALID(terri)) {
700 tag = CQHCI_TERRI_D_TASK(terri);
701 slot = &cq_host->slot[tag];
703 slot->flags = cqhci_error_flags(data_error, cmd_error);
704 cqhci_recovery_needed(mmc, slot->mrq, true);
708 if (!cq_host->recovery_halt) {
710 * The only way to guarantee forward progress is to mark at
711 * least one task in error, so if none is indicated, pick one.
713 for (tag = 0; tag < NUM_SLOTS; tag++) {
714 slot = &cq_host->slot[tag];
717 slot->flags = cqhci_error_flags(data_error, cmd_error);
718 cqhci_recovery_needed(mmc, slot->mrq, true);
724 spin_unlock(&cq_host->lock);
727 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
729 struct cqhci_host *cq_host = mmc->cqe_private;
730 struct cqhci_slot *slot = &cq_host->slot[tag];
731 struct mmc_request *mrq = slot->mrq;
732 struct mmc_data *data;
735 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
736 mmc_hostname(mmc), tag);
740 /* No completions allowed during recovery */
741 if (cq_host->recovery_halt) {
742 slot->flags |= CQHCI_COMPLETED;
753 data->bytes_xfered = 0;
755 data->bytes_xfered = data->blksz * data->blocks;
758 mmc_cqe_request_done(mmc, mrq);
761 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
765 unsigned long tag = 0, comp_status;
766 struct cqhci_host *cq_host = mmc->cqe_private;
768 status = cqhci_readl(cq_host, CQHCI_IS);
769 cqhci_writel(cq_host, status, CQHCI_IS);
771 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
773 if ((status & CQHCI_IS_RED) || cmd_error || data_error)
774 cqhci_error_irq(mmc, status, cmd_error, data_error);
776 if (status & CQHCI_IS_TCC) {
777 /* read TCN and complete the request */
778 comp_status = cqhci_readl(cq_host, CQHCI_TCN);
779 cqhci_writel(cq_host, comp_status, CQHCI_TCN);
780 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
781 mmc_hostname(mmc), comp_status);
783 spin_lock(&cq_host->lock);
785 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
786 /* complete the corresponding mrq */
787 pr_debug("%s: cqhci: completing tag %lu\n",
788 mmc_hostname(mmc), tag);
789 cqhci_finish_mrq(mmc, tag);
792 if (cq_host->waiting_for_idle && !cq_host->qcnt) {
793 cq_host->waiting_for_idle = false;
794 wake_up(&cq_host->wait_queue);
797 spin_unlock(&cq_host->lock);
800 if (status & CQHCI_IS_TCL)
801 wake_up(&cq_host->wait_queue);
803 if (status & CQHCI_IS_HAC)
804 wake_up(&cq_host->wait_queue);
808 EXPORT_SYMBOL(cqhci_irq);
810 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
815 spin_lock_irqsave(&cq_host->lock, flags);
816 is_idle = !cq_host->qcnt || cq_host->recovery_halt;
817 *ret = cq_host->recovery_halt ? -EBUSY : 0;
818 cq_host->waiting_for_idle = !is_idle;
819 spin_unlock_irqrestore(&cq_host->lock, flags);
824 static int cqhci_wait_for_idle(struct mmc_host *mmc)
826 struct cqhci_host *cq_host = mmc->cqe_private;
829 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
834 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
835 bool *recovery_needed)
837 struct cqhci_host *cq_host = mmc->cqe_private;
838 int tag = cqhci_tag(mrq);
839 struct cqhci_slot *slot = &cq_host->slot[tag];
843 spin_lock_irqsave(&cq_host->lock, flags);
844 timed_out = slot->mrq == mrq;
846 slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
847 cqhci_recovery_needed(mmc, mrq, false);
848 *recovery_needed = cq_host->recovery_halt;
850 spin_unlock_irqrestore(&cq_host->lock, flags);
853 pr_err("%s: cqhci: timeout for tag %d\n",
854 mmc_hostname(mmc), tag);
855 cqhci_dumpregs(cq_host);
861 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
863 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
866 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
868 struct cqhci_host *cq_host = mmc->cqe_private;
872 cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
874 ctl = cqhci_readl(cq_host, CQHCI_CTL);
875 ctl |= CQHCI_CLEAR_ALL_TASKS;
876 cqhci_writel(cq_host, ctl, CQHCI_CTL);
878 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
879 msecs_to_jiffies(timeout) + 1);
881 cqhci_set_irqs(cq_host, 0);
883 ret = cqhci_tasks_cleared(cq_host);
886 pr_debug("%s: cqhci: Failed to clear tasks\n",
892 static bool cqhci_halted(struct cqhci_host *cq_host)
894 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
897 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
899 struct cqhci_host *cq_host = mmc->cqe_private;
903 if (cqhci_halted(cq_host))
906 cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
908 ctl = cqhci_readl(cq_host, CQHCI_CTL);
910 cqhci_writel(cq_host, ctl, CQHCI_CTL);
912 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
913 msecs_to_jiffies(timeout) + 1);
915 cqhci_set_irqs(cq_host, 0);
917 ret = cqhci_halted(cq_host);
920 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
926 * After halting we expect to be able to use the command line. We interpret the
927 * failure to halt to mean the data lines might still be in use (and the upper
928 * layers will need to send a STOP command), so we set the timeout based on a
929 * generous command timeout.
931 #define CQHCI_START_HALT_TIMEOUT 5
933 static void cqhci_recovery_start(struct mmc_host *mmc)
935 struct cqhci_host *cq_host = mmc->cqe_private;
937 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
939 WARN_ON(!cq_host->recovery_halt);
941 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
943 if (cq_host->ops->disable)
944 cq_host->ops->disable(mmc, true);
949 static int cqhci_error_from_flags(unsigned int flags)
954 /* CRC errors might indicate re-tuning so prefer to report that */
955 if (flags & CQHCI_HOST_CRC)
958 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
964 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
966 struct cqhci_slot *slot = &cq_host->slot[tag];
967 struct mmc_request *mrq = slot->mrq;
968 struct mmc_data *data;
979 data->bytes_xfered = 0;
980 data->error = cqhci_error_from_flags(slot->flags);
982 mrq->cmd->error = cqhci_error_from_flags(slot->flags);
985 mmc_cqe_request_done(cq_host->mmc, mrq);
988 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
992 for (i = 0; i < cq_host->num_slots; i++)
993 cqhci_recover_mrq(cq_host, i);
997 * By now the command and data lines should be unused so there is no reason for
998 * CQHCI to take a long time to halt, but if it doesn't halt there could be
999 * problems clearing tasks, so be generous.
1001 #define CQHCI_FINISH_HALT_TIMEOUT 20
1003 /* CQHCI could be expected to clear it's internal state pretty quickly */
1004 #define CQHCI_CLEAR_TIMEOUT 20
1006 static void cqhci_recovery_finish(struct mmc_host *mmc)
1008 struct cqhci_host *cq_host = mmc->cqe_private;
1009 unsigned long flags;
1013 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1015 WARN_ON(!cq_host->recovery_halt);
1017 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1019 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1023 * The specification contradicts itself, by saying that tasks cannot be
1024 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1025 * be disabled/re-enabled, but not to disable before clearing tasks.
1029 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1030 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1031 cqcfg &= ~CQHCI_ENABLE;
1032 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1033 cqcfg |= CQHCI_ENABLE;
1034 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1035 /* Be sure that there are no tasks */
1036 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1037 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1042 cqhci_recover_mrqs(cq_host);
1044 WARN_ON(cq_host->qcnt);
1046 spin_lock_irqsave(&cq_host->lock, flags);
1048 cq_host->recovery_halt = false;
1049 mmc->cqe_on = false;
1050 spin_unlock_irqrestore(&cq_host->lock, flags);
1052 /* Ensure all writes are done before interrupts are re-enabled */
1055 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1057 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1059 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1062 static const struct mmc_cqe_ops cqhci_cqe_ops = {
1063 .cqe_enable = cqhci_enable,
1064 .cqe_disable = cqhci_disable,
1065 .cqe_request = cqhci_request,
1066 .cqe_post_req = cqhci_post_req,
1067 .cqe_off = cqhci_off,
1068 .cqe_wait_for_idle = cqhci_wait_for_idle,
1069 .cqe_timeout = cqhci_timeout,
1070 .cqe_recovery_start = cqhci_recovery_start,
1071 .cqe_recovery_finish = cqhci_recovery_finish,
1074 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1076 struct cqhci_host *cq_host;
1077 struct resource *cqhci_memres = NULL;
1079 /* check and setup CMDQ interface */
1080 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1082 if (!cqhci_memres) {
1083 dev_dbg(&pdev->dev, "CMDQ not supported\n");
1084 return ERR_PTR(-EINVAL);
1087 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1089 return ERR_PTR(-ENOMEM);
1090 cq_host->mmio = devm_ioremap(&pdev->dev,
1091 cqhci_memres->start,
1092 resource_size(cqhci_memres));
1093 if (!cq_host->mmio) {
1094 dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1095 return ERR_PTR(-EBUSY);
1097 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1101 EXPORT_SYMBOL(cqhci_pltfm_init);
1103 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1105 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1108 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1110 u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1112 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1115 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1120 cq_host->dma64 = dma64;
1122 cq_host->mmc->cqe_private = cq_host;
1124 cq_host->num_slots = NUM_SLOTS;
1125 cq_host->dcmd_slot = DCMD_SLOT;
1127 mmc->cqe_ops = &cqhci_cqe_ops;
1129 mmc->cqe_qdepth = NUM_SLOTS;
1130 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1131 mmc->cqe_qdepth -= 1;
1133 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1134 sizeof(*cq_host->slot), GFP_KERNEL);
1135 if (!cq_host->slot) {
1140 spin_lock_init(&cq_host->lock);
1142 init_completion(&cq_host->halt_comp);
1143 init_waitqueue_head(&cq_host->wait_queue);
1145 pr_info("%s: CQHCI version %u.%02u\n",
1146 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1147 cqhci_ver_minor(cq_host));
1152 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1153 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1154 cqhci_ver_minor(cq_host), err);
1157 EXPORT_SYMBOL(cqhci_init);
1159 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1160 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1161 MODULE_LICENSE("GPL v2");