1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Actions Semi Owl SoCs SD/MMC driver
5 * Copyright (c) 2014 Actions Semi Inc.
6 * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/slot-gpio.h>
19 #include <linux/module.h>
20 #include <linux/of_platform.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
27 #define OWL_REG_SD_EN 0x0000
28 #define OWL_REG_SD_CTL 0x0004
29 #define OWL_REG_SD_STATE 0x0008
30 #define OWL_REG_SD_CMD 0x000c
31 #define OWL_REG_SD_ARG 0x0010
32 #define OWL_REG_SD_RSPBUF0 0x0014
33 #define OWL_REG_SD_RSPBUF1 0x0018
34 #define OWL_REG_SD_RSPBUF2 0x001c
35 #define OWL_REG_SD_RSPBUF3 0x0020
36 #define OWL_REG_SD_RSPBUF4 0x0024
37 #define OWL_REG_SD_DAT 0x0028
38 #define OWL_REG_SD_BLK_SIZE 0x002c
39 #define OWL_REG_SD_BLK_NUM 0x0030
40 #define OWL_REG_SD_BUF_SIZE 0x0034
43 #define OWL_SD_EN_RANE BIT(31)
44 #define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
45 #define OWL_SD_EN_S18EN BIT(12)
46 #define OWL_SD_EN_RESE BIT(10)
47 #define OWL_SD_EN_DAT1_S BIT(9)
48 #define OWL_SD_EN_CLK_S BIT(8)
49 #define OWL_SD_ENABLE BIT(7)
50 #define OWL_SD_EN_BSEL BIT(6)
51 #define OWL_SD_EN_SDIOEN BIT(3)
52 #define OWL_SD_EN_DDREN BIT(2)
53 #define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
56 #define OWL_SD_CTL_TOUTEN BIT(31)
57 #define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
58 #define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
59 #define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
60 #define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
61 #define OWL_SD_CTL_CMDLEN BIT(13)
62 #define OWL_SD_CTL_SCC BIT(12)
63 #define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
64 #define OWL_SD_CTL_TS BIT(7)
65 #define OWL_SD_CTL_LBE BIT(6)
66 #define OWL_SD_CTL_C7EN BIT(5)
67 #define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
69 #define OWL_SD_DELAY_LOW_CLK 0x0f
70 #define OWL_SD_DELAY_MID_CLK 0x0a
71 #define OWL_SD_DELAY_HIGH_CLK 0x09
72 #define OWL_SD_RDELAY_DDR50 0x0a
73 #define OWL_SD_WDELAY_DDR50 0x08
76 #define OWL_SD_STATE_DAT1BS BIT(18)
77 #define OWL_SD_STATE_SDIOB_P BIT(17)
78 #define OWL_SD_STATE_SDIOB_EN BIT(16)
79 #define OWL_SD_STATE_TOUTE BIT(15)
80 #define OWL_SD_STATE_BAEP BIT(14)
81 #define OWL_SD_STATE_MEMRDY BIT(12)
82 #define OWL_SD_STATE_CMDS BIT(11)
83 #define OWL_SD_STATE_DAT1AS BIT(10)
84 #define OWL_SD_STATE_SDIOA_P BIT(9)
85 #define OWL_SD_STATE_SDIOA_EN BIT(8)
86 #define OWL_SD_STATE_DAT0S BIT(7)
87 #define OWL_SD_STATE_TEIE BIT(6)
88 #define OWL_SD_STATE_TEI BIT(5)
89 #define OWL_SD_STATE_CLNR BIT(4)
90 #define OWL_SD_STATE_CLC BIT(3)
91 #define OWL_SD_STATE_WC16ER BIT(2)
92 #define OWL_SD_STATE_RC16ER BIT(1)
93 #define OWL_SD_STATE_CRC7ER BIT(0)
95 #define OWL_CMD_TIMEOUT_MS 30000
99 struct reset_control *reset;
102 struct completion sdc_complete;
108 enum dma_data_direction dma_dir;
109 struct dma_chan *dma;
110 struct dma_async_tx_descriptor *desc;
111 struct dma_slave_config dma_cfg;
112 struct completion dma_complete;
114 struct mmc_host *mmc;
115 struct mmc_request *mrq;
116 struct mmc_command *cmd;
117 struct mmc_data *data;
120 static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
134 static irqreturn_t owl_irq_handler(int irq, void *devid)
136 struct owl_mmc_host *owl_host = devid;
140 spin_lock_irqsave(&owl_host->lock, flags);
142 state = readl(owl_host->base + OWL_REG_SD_STATE);
143 if (state & OWL_SD_STATE_TEI) {
144 state = readl(owl_host->base + OWL_REG_SD_STATE);
145 state |= OWL_SD_STATE_TEI;
146 writel(state, owl_host->base + OWL_REG_SD_STATE);
147 complete(&owl_host->sdc_complete);
150 spin_unlock_irqrestore(&owl_host->lock, flags);
155 static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
157 struct mmc_request *mrq = owl_host->mrq;
158 struct mmc_data *data = mrq->data;
160 /* Should never be NULL */
163 owl_host->mrq = NULL;
166 dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
169 /* Finally finish request */
170 mmc_request_done(owl_host->mmc, mrq);
173 static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
174 struct mmc_command *cmd,
175 struct mmc_data *data)
177 unsigned long timeout;
178 u32 mode, state, resp[2];
179 u32 cmd_rsp_mask = 0;
181 init_completion(&owl_host->sdc_complete);
183 switch (mmc_resp_type(cmd)) {
185 mode = OWL_SD_CTL_TM(0);
190 if (data->flags & MMC_DATA_READ)
191 mode = OWL_SD_CTL_TM(4);
193 mode = OWL_SD_CTL_TM(5);
195 mode = OWL_SD_CTL_TM(1);
197 cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
202 mode = OWL_SD_CTL_TM(3);
203 cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
207 mode = OWL_SD_CTL_TM(2);
208 cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
212 mode = OWL_SD_CTL_TM(1);
213 cmd_rsp_mask = OWL_SD_STATE_CLNR;
217 dev_warn(owl_host->dev, "Unknown MMC command\n");
218 cmd->error = -EINVAL;
222 /* Keep current WDELAY and RDELAY */
223 mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
225 /* Start to send corresponding command type */
226 writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
227 writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
229 /* Set LBE to send clk at the end of last read block */
231 mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
233 mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
234 mode |= OWL_SD_CTL_TS;
240 writel(mode, owl_host->base + OWL_REG_SD_CTL);
245 timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout :
248 if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
249 dev_err(owl_host->dev, "CMD interrupt timeout\n");
250 cmd->error = -ETIMEDOUT;
254 state = readl(owl_host->base + OWL_REG_SD_STATE);
255 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
256 if (cmd_rsp_mask & state) {
257 if (state & OWL_SD_STATE_CLNR) {
258 dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
259 cmd->error = -EILSEQ;
263 if (state & OWL_SD_STATE_CRC7ER) {
264 dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
265 cmd->error = -EILSEQ;
270 if (mmc_resp_type(cmd) & MMC_RSP_136) {
271 cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
272 cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
273 cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
274 cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
276 resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
277 resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
278 cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
279 cmd->resp[1] = resp[1] >> 8;
284 static void owl_mmc_dma_complete(void *param)
286 struct owl_mmc_host *owl_host = param;
287 struct mmc_data *data = owl_host->data;
290 complete(&owl_host->dma_complete);
293 static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
294 struct mmc_data *data)
298 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
300 writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
301 writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
302 total = data->blksz * data->blocks;
305 writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
307 writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
309 if (data->flags & MMC_DATA_WRITE) {
310 owl_host->dma_dir = DMA_TO_DEVICE;
311 owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
313 owl_host->dma_dir = DMA_FROM_DEVICE;
314 owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
317 dma_map_sg(owl_host->dma->device->dev, data->sg,
318 data->sg_len, owl_host->dma_dir);
320 dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
321 owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
323 owl_host->dma_cfg.direction,
326 if (!owl_host->desc) {
327 dev_err(owl_host->dev, "Can't prepare slave sg\n");
331 owl_host->data = data;
333 owl_host->desc->callback = owl_mmc_dma_complete;
334 owl_host->desc->callback_param = (void *)owl_host;
340 static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
342 struct owl_mmc_host *owl_host = mmc_priv(mmc);
343 struct mmc_data *data = mrq->data;
348 ret = owl_mmc_prepare_data(owl_host, data);
354 init_completion(&owl_host->dma_complete);
355 dmaengine_submit(owl_host->desc);
356 dma_async_issue_pending(owl_host->dma);
359 owl_mmc_send_cmd(owl_host, mrq->cmd, data);
362 if (!wait_for_completion_timeout(&owl_host->sdc_complete,
364 dev_err(owl_host->dev, "CMD interrupt timeout\n");
365 mrq->cmd->error = -ETIMEDOUT;
366 dmaengine_terminate_all(owl_host->dma);
370 if (!wait_for_completion_timeout(&owl_host->dma_complete,
372 dev_err(owl_host->dev, "DMA interrupt timeout\n");
373 mrq->cmd->error = -ETIMEDOUT;
374 dmaengine_terminate_all(owl_host->dma);
379 owl_mmc_send_cmd(owl_host, data->stop, NULL);
381 data->bytes_xfered = data->blocks * data->blksz;
385 owl_mmc_finish_request(owl_host);
388 static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
391 unsigned long clk_rate;
395 reg = readl(owl_host->base + OWL_REG_SD_CTL);
396 reg &= ~OWL_SD_CTL_DELAY_MSK;
398 /* Set RDELAY and WDELAY based on the clock */
399 if (rate <= 1000000) {
400 writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
401 OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
402 owl_host->base + OWL_REG_SD_CTL);
403 } else if ((rate > 1000000) && (rate <= 26000000)) {
404 writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
405 OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
406 owl_host->base + OWL_REG_SD_CTL);
407 } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
408 writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
409 OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
410 owl_host->base + OWL_REG_SD_CTL);
411 /* DDR50 mode has special delay chain */
412 } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
413 writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
414 OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
415 owl_host->base + OWL_REG_SD_CTL);
417 dev_err(owl_host->dev, "SD clock rate not supported\n");
421 clk_rate = clk_round_rate(owl_host->clk, rate << 1);
422 ret = clk_set_rate(owl_host->clk, clk_rate);
427 static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
432 owl_host->clock = ios->clock;
433 owl_mmc_set_clk_rate(owl_host, ios->clock);
436 static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
441 reg = readl(owl_host->base + OWL_REG_SD_EN);
443 switch (ios->bus_width) {
444 case MMC_BUS_WIDTH_1:
446 case MMC_BUS_WIDTH_4:
447 reg |= OWL_SD_EN_DATAWID(1);
449 case MMC_BUS_WIDTH_8:
450 reg |= OWL_SD_EN_DATAWID(2);
454 writel(reg, owl_host->base + OWL_REG_SD_EN);
457 static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
459 reset_control_assert(owl_host->reset);
461 reset_control_deassert(owl_host->reset);
464 static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
468 init_completion(&owl_host->sdc_complete);
470 /* Enable transfer end IRQ */
471 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
472 OWL_SD_STATE_TEIE, true);
475 mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
476 mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
477 writel(mode, owl_host->base + OWL_REG_SD_CTL);
479 if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
480 dev_err(owl_host->dev, "CMD interrupt timeout\n");
485 static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
487 struct owl_mmc_host *owl_host = mmc_priv(mmc);
489 switch (ios->power_mode) {
491 dev_dbg(owl_host->dev, "Powering card up\n");
493 /* Reset the SDC controller to clear all previous states */
494 owl_mmc_ctr_reset(owl_host);
495 clk_prepare_enable(owl_host->clk);
496 writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
497 owl_host->base + OWL_REG_SD_EN);
502 dev_dbg(owl_host->dev, "Powering card on\n");
503 owl_mmc_power_on(owl_host);
508 dev_dbg(owl_host->dev, "Powering card off\n");
509 clk_disable_unprepare(owl_host->clk);
514 dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
518 if (ios->clock != owl_host->clock)
519 owl_mmc_set_clk(owl_host, ios);
521 owl_mmc_set_bus_width(owl_host, ios);
523 /* Enable DDR mode if requested */
524 if (ios->timing == MMC_TIMING_UHS_DDR50) {
525 owl_host->ddr_50 = 1;
526 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
527 OWL_SD_EN_DDREN, true);
529 owl_host->ddr_50 = 0;
533 static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
536 struct owl_mmc_host *owl_host = mmc_priv(mmc);
538 /* It is enough to change the pad ctrl bit for voltage switch */
539 switch (ios->signal_voltage) {
540 case MMC_SIGNAL_VOLTAGE_330:
541 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
542 OWL_SD_EN_S18EN, false);
544 case MMC_SIGNAL_VOLTAGE_180:
545 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
546 OWL_SD_EN_S18EN, true);
555 static const struct mmc_host_ops owl_mmc_ops = {
556 .request = owl_mmc_request,
557 .set_ios = owl_mmc_set_ios,
558 .get_ro = mmc_gpio_get_ro,
559 .get_cd = mmc_gpio_get_cd,
560 .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
563 static int owl_mmc_probe(struct platform_device *pdev)
565 struct owl_mmc_host *owl_host;
566 struct mmc_host *mmc;
567 struct resource *res;
570 mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
572 dev_err(&pdev->dev, "mmc alloc host failed\n");
575 platform_set_drvdata(pdev, mmc);
577 owl_host = mmc_priv(mmc);
578 owl_host->dev = &pdev->dev;
580 spin_lock_init(&owl_host->lock);
582 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
583 owl_host->base = devm_ioremap_resource(&pdev->dev, res);
584 if (IS_ERR(owl_host->base)) {
585 dev_err(&pdev->dev, "Failed to remap registers\n");
586 ret = PTR_ERR(owl_host->base);
590 owl_host->clk = devm_clk_get(&pdev->dev, NULL);
591 if (IS_ERR(owl_host->clk)) {
592 dev_err(&pdev->dev, "No clock defined\n");
593 ret = PTR_ERR(owl_host->clk);
597 owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
598 if (IS_ERR(owl_host->reset)) {
599 dev_err(&pdev->dev, "Could not get reset control\n");
600 ret = PTR_ERR(owl_host->reset);
604 mmc->ops = &owl_mmc_ops;
605 mmc->max_blk_count = 512;
606 mmc->max_blk_size = 512;
608 mmc->max_seg_size = 262144;
609 mmc->max_req_size = 262144;
612 mmc->f_max = 52000000;
613 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
615 mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
616 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
619 ret = mmc_of_parse(mmc);
623 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
624 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
625 owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
626 if (IS_ERR(owl_host->dma)) {
627 dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
628 ret = PTR_ERR(owl_host->dma);
632 dev_info(&pdev->dev, "Using %s for DMA transfers\n",
633 dma_chan_name(owl_host->dma));
635 owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
636 owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
637 owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
638 owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
639 owl_host->dma_cfg.device_fc = false;
641 owl_host->irq = platform_get_irq(pdev, 0);
642 if (owl_host->irq < 0) {
647 ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
648 0, dev_name(&pdev->dev), owl_host);
650 dev_err(&pdev->dev, "Failed to request irq %d\n",
655 ret = mmc_add_host(mmc);
657 dev_err(&pdev->dev, "Failed to add host\n");
661 dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
671 static int owl_mmc_remove(struct platform_device *pdev)
673 struct mmc_host *mmc = platform_get_drvdata(pdev);
674 struct owl_mmc_host *owl_host = mmc_priv(mmc);
676 mmc_remove_host(mmc);
677 disable_irq(owl_host->irq);
683 static const struct of_device_id owl_mmc_of_match[] = {
684 {.compatible = "actions,owl-mmc",},
687 MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
689 static struct platform_driver owl_mmc_driver = {
692 .of_match_table = owl_mmc_of_match,
694 .probe = owl_mmc_probe,
695 .remove = owl_mmc_remove,
697 module_platform_driver(owl_mmc_driver);
699 MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
700 MODULE_AUTHOR("Actions Semi");
701 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
702 MODULE_LICENSE("GPL");