2 * linux/drivers/mmc/core/mmc_ops.h
4 * Copyright 2006-2007 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
26 #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
28 static const u8 tuning_blk_pattern_4bit[] = {
29 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
30 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
31 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
32 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
33 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
34 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
35 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
36 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
39 static const u8 tuning_blk_pattern_8bit[] = {
40 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
41 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
42 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
43 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
44 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
45 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
46 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
47 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
48 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
49 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
50 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
51 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
52 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
53 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
54 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
55 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
58 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
61 struct mmc_command cmd = {};
63 cmd.opcode = MMC_SEND_STATUS;
64 if (!mmc_host_is_spi(card->host))
65 cmd.arg = card->rca << 16;
66 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
68 err = mmc_wait_for_cmd(card->host, &cmd, retries);
72 /* NOTE: callers are required to understand the difference
73 * between "native" and SPI format status words!
76 *status = cmd.resp[0];
80 EXPORT_SYMBOL_GPL(__mmc_send_status);
82 int mmc_send_status(struct mmc_card *card, u32 *status)
84 return __mmc_send_status(card, status, MMC_CMD_RETRIES);
87 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
89 struct mmc_command cmd = {};
91 cmd.opcode = MMC_SELECT_CARD;
94 cmd.arg = card->rca << 16;
95 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
98 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
101 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
104 int mmc_select_card(struct mmc_card *card)
107 return _mmc_select_card(card->host, card);
110 int mmc_deselect_cards(struct mmc_host *host)
112 return _mmc_select_card(host, NULL);
116 * Write the value specified in the device tree or board code into the optional
117 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
118 * drive strength of the DAT and CMD outputs. The actual meaning of a given
119 * value is hardware dependant.
120 * The presence of the DSR register can be determined from the CSD register,
123 int mmc_set_dsr(struct mmc_host *host)
125 struct mmc_command cmd = {};
127 cmd.opcode = MMC_SET_DSR;
129 cmd.arg = (host->dsr << 16) | 0xffff;
130 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
132 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
135 int mmc_go_idle(struct mmc_host *host)
138 struct mmc_command cmd = {};
141 * Non-SPI hosts need to prevent chipselect going active during
142 * GO_IDLE; that would put chips into SPI mode. Remind them of
143 * that in case of hardware that won't pull up DAT3/nCS otherwise.
145 * SPI hosts ignore ios.chip_select; it's managed according to
146 * rules that must accommodate non-MMC slaves which this layer
147 * won't even know about.
149 if (!mmc_host_is_spi(host)) {
150 mmc_set_chip_select(host, MMC_CS_HIGH);
154 cmd.opcode = MMC_GO_IDLE_STATE;
156 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
158 err = mmc_wait_for_cmd(host, &cmd, 0);
162 if (!mmc_host_is_spi(host)) {
163 mmc_set_chip_select(host, MMC_CS_DONTCARE);
167 host->use_spi_crc = 0;
172 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
174 struct mmc_command cmd = {};
177 cmd.opcode = MMC_SEND_OP_COND;
178 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
179 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
181 for (i = 100; i; i--) {
182 err = mmc_wait_for_cmd(host, &cmd, 0);
186 /* if we're just probing, do a single pass */
190 /* otherwise wait until reset completes */
191 if (mmc_host_is_spi(host)) {
192 if (!(cmd.resp[0] & R1_SPI_IDLE))
195 if (cmd.resp[0] & MMC_CARD_BUSY)
204 if (rocr && !mmc_host_is_spi(host))
210 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
213 struct mmc_command cmd = {};
215 cmd.opcode = MMC_ALL_SEND_CID;
217 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
219 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
223 memcpy(cid, cmd.resp, sizeof(u32) * 4);
228 int mmc_set_relative_addr(struct mmc_card *card)
230 struct mmc_command cmd = {};
232 cmd.opcode = MMC_SET_RELATIVE_ADDR;
233 cmd.arg = card->rca << 16;
234 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
236 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
240 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
243 struct mmc_command cmd = {};
247 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
249 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
253 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
259 * NOTE: void *buf, caller for the buf is required to use DMA-capable
260 * buffer or on-stack buffer (with some overhead in callee).
263 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
264 u32 opcode, void *buf, unsigned len)
266 struct mmc_request mrq = {};
267 struct mmc_command cmd = {};
268 struct mmc_data data = {};
269 struct scatterlist sg;
277 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
278 * rely on callers to never use this with "native" calls for reading
279 * CSD or CID. Native versions of those commands use the R2 type,
280 * not R1 plus a data block.
282 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
286 data.flags = MMC_DATA_READ;
290 sg_init_one(&sg, buf, len);
292 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
294 * The spec states that CSR and CID accesses have a timeout
295 * of 64 clock cycles.
298 data.timeout_clks = 64;
300 mmc_set_data_timeout(&data, card);
302 mmc_wait_for_req(host, &mrq);
312 int mmc_send_csd(struct mmc_card *card, u32 *csd)
317 if (!mmc_host_is_spi(card->host))
318 return mmc_send_cxd_native(card->host, card->rca << 16,
321 csd_tmp = kzalloc(16, GFP_KERNEL);
325 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
329 for (i = 0; i < 4; i++)
330 csd[i] = be32_to_cpu(csd_tmp[i]);
337 int mmc_send_cid(struct mmc_host *host, u32 *cid)
342 if (!mmc_host_is_spi(host)) {
345 return mmc_send_cxd_native(host, host->card->rca << 16,
349 cid_tmp = kzalloc(16, GFP_KERNEL);
353 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
357 for (i = 0; i < 4; i++)
358 cid[i] = be32_to_cpu(cid_tmp[i]);
365 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
370 if (!card || !new_ext_csd)
373 if (!mmc_can_ext_csd(card))
377 * As the ext_csd is so large and mostly unused, we don't store the
378 * raw block in mmc_card.
380 ext_csd = kzalloc(512, GFP_KERNEL);
384 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
389 *new_ext_csd = ext_csd;
393 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
395 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
397 struct mmc_command cmd = {};
400 cmd.opcode = MMC_SPI_READ_OCR;
401 cmd.arg = highcap ? (1 << 30) : 0;
402 cmd.flags = MMC_RSP_SPI_R3;
404 err = mmc_wait_for_cmd(host, &cmd, 0);
410 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
412 struct mmc_command cmd = {};
415 cmd.opcode = MMC_SPI_CRC_ON_OFF;
416 cmd.flags = MMC_RSP_SPI_R1;
419 err = mmc_wait_for_cmd(host, &cmd, 0);
421 host->use_spi_crc = use_crc;
425 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
427 if (mmc_host_is_spi(host)) {
428 if (status & R1_SPI_ILLEGAL_COMMAND)
431 if (status & 0xFDFFA000)
432 pr_warn("%s: unexpected status %#x after switch\n",
433 mmc_hostname(host), status);
434 if (status & R1_SWITCH_ERROR)
440 /* Caller must hold re-tuning */
441 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
446 err = mmc_send_status(card, &status);
447 if (!crc_err_fatal && err == -EILSEQ)
452 return mmc_switch_status_error(card->host, status);
455 int mmc_switch_status(struct mmc_card *card)
457 return __mmc_switch_status(card, true);
460 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
461 bool send_status, bool retry_crc_err)
463 struct mmc_host *host = card->host;
465 unsigned long timeout;
467 bool expired = false;
470 /* We have an unspecified cmd timeout, use the fallback value. */
472 timeout_ms = MMC_OPS_TIMEOUT_MS;
475 * In cases when not allowed to poll by using CMD13 or because we aren't
476 * capable of polling by using ->card_busy(), then rely on waiting the
477 * stated timeout to be sufficient.
479 if (!send_status && !host->ops->card_busy) {
480 mmc_delay(timeout_ms);
484 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
487 * Due to the possibility of being preempted while polling,
488 * check the expiration time first.
490 expired = time_after(jiffies, timeout);
492 if (host->ops->card_busy) {
493 busy = host->ops->card_busy(host);
495 err = mmc_send_status(card, &status);
496 if (retry_crc_err && err == -EILSEQ) {
501 err = mmc_switch_status_error(host, status);
504 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
508 /* Timeout if the device still remains busy. */
509 if (expired && busy) {
510 pr_err("%s: Card stuck being busy! %s\n",
511 mmc_hostname(host), __func__);
520 * __mmc_switch - modify EXT_CSD register
521 * @card: the MMC card associated with the data transfer
522 * @set: cmd set values
523 * @index: EXT_CSD register index
524 * @value: value to program into EXT_CSD register
525 * @timeout_ms: timeout (ms) for operation performed by register write,
526 * timeout of zero implies maximum possible timeout
527 * @timing: new timing to change to
528 * @use_busy_signal: use the busy signal as response type
529 * @send_status: send status cmd to poll for busy
530 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
532 * Modifies the EXT_CSD register for selected card.
534 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
535 unsigned int timeout_ms, unsigned char timing,
536 bool use_busy_signal, bool send_status, bool retry_crc_err)
538 struct mmc_host *host = card->host;
540 struct mmc_command cmd = {};
541 bool use_r1b_resp = use_busy_signal;
542 unsigned char old_timing = host->ios.timing;
544 mmc_retune_hold(host);
547 * If the cmd timeout and the max_busy_timeout of the host are both
548 * specified, let's validate them. A failure means we need to prevent
549 * the host from doing hw busy detection, which is done by converting
550 * to a R1 response instead of a R1B.
552 if (timeout_ms && host->max_busy_timeout &&
553 (timeout_ms > host->max_busy_timeout))
554 use_r1b_resp = false;
556 cmd.opcode = MMC_SWITCH;
557 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
561 cmd.flags = MMC_CMD_AC;
563 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
565 * A busy_timeout of zero means the host can decide to use
566 * whatever value it finds suitable.
568 cmd.busy_timeout = timeout_ms;
570 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
573 if (index == EXT_CSD_SANITIZE_START)
574 cmd.sanitize_busy = true;
576 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
580 /* No need to check card status in case of unblocking command */
581 if (!use_busy_signal)
584 /*If SPI or used HW busy detection above, then we don't need to poll. */
585 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
586 mmc_host_is_spi(host))
589 /* Let's try to poll to find out when the command is completed. */
590 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
595 /* Switch to new timing before check switch status. */
597 mmc_set_timing(host, timing);
600 err = mmc_switch_status(card);
602 mmc_set_timing(host, old_timing);
605 mmc_retune_release(host);
610 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
611 unsigned int timeout_ms)
613 return __mmc_switch(card, set, index, value, timeout_ms, 0,
616 EXPORT_SYMBOL_GPL(mmc_switch);
618 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
620 struct mmc_request mrq = {};
621 struct mmc_command cmd = {};
622 struct mmc_data data = {};
623 struct scatterlist sg;
624 struct mmc_ios *ios = &host->ios;
625 const u8 *tuning_block_pattern;
629 if (ios->bus_width == MMC_BUS_WIDTH_8) {
630 tuning_block_pattern = tuning_blk_pattern_8bit;
631 size = sizeof(tuning_blk_pattern_8bit);
632 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
633 tuning_block_pattern = tuning_blk_pattern_4bit;
634 size = sizeof(tuning_blk_pattern_4bit);
638 data_buf = kzalloc(size, GFP_KERNEL);
646 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
650 data.flags = MMC_DATA_READ;
653 * According to the tuning specs, Tuning process
654 * is normally shorter 40 executions of CMD19,
655 * and timeout value should be shorter than 150 ms
657 data.timeout_ns = 150 * NSEC_PER_MSEC;
661 sg_init_one(&sg, data_buf, size);
663 mmc_wait_for_req(host, &mrq);
666 *cmd_error = cmd.error;
678 if (memcmp(data_buf, tuning_block_pattern, size))
685 EXPORT_SYMBOL_GPL(mmc_send_tuning);
687 int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
689 struct mmc_command cmd = {};
692 * eMMC specification specifies that CMD12 can be used to stop a tuning
693 * command, but SD specification does not, so do nothing unless it is
696 if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
699 cmd.opcode = MMC_STOP_TRANSMISSION;
700 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
703 * For drivers that override R1 to R1b, set an arbitrary timeout based
704 * on the tuning timeout i.e. 150ms.
706 cmd.busy_timeout = 150;
708 return mmc_wait_for_cmd(host, &cmd, 0);
710 EXPORT_SYMBOL_GPL(mmc_abort_tuning);
713 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
716 struct mmc_request mrq = {};
717 struct mmc_command cmd = {};
718 struct mmc_data data = {};
719 struct scatterlist sg;
723 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
724 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
726 /* dma onto stack is unsafe/nonportable, but callers to this
727 * routine normally provide temporary on-stack buffers ...
729 data_buf = kmalloc(len, GFP_KERNEL);
734 test_buf = testdata_8bit;
736 test_buf = testdata_4bit;
738 pr_err("%s: Invalid bus_width %d\n",
739 mmc_hostname(host), len);
744 if (opcode == MMC_BUS_TEST_W)
745 memcpy(data_buf, test_buf, len);
752 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
753 * rely on callers to never use this with "native" calls for reading
754 * CSD or CID. Native versions of those commands use the R2 type,
755 * not R1 plus a data block.
757 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
761 if (opcode == MMC_BUS_TEST_R)
762 data.flags = MMC_DATA_READ;
764 data.flags = MMC_DATA_WRITE;
768 mmc_set_data_timeout(&data, card);
769 sg_init_one(&sg, data_buf, len);
770 mmc_wait_for_req(host, &mrq);
772 if (opcode == MMC_BUS_TEST_R) {
773 for (i = 0; i < len / 4; i++)
774 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
789 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
793 if (bus_width == MMC_BUS_WIDTH_8)
795 else if (bus_width == MMC_BUS_WIDTH_4)
797 else if (bus_width == MMC_BUS_WIDTH_1)
798 return 0; /* no need for test */
803 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
804 * is a problem. This improves chances that the test will work.
806 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
807 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
810 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
812 struct mmc_command cmd = {};
816 if (!card->ext_csd.hpi) {
817 pr_warn("%s: Card didn't support HPI command\n",
818 mmc_hostname(card->host));
822 opcode = card->ext_csd.hpi_cmd;
823 if (opcode == MMC_STOP_TRANSMISSION)
824 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
825 else if (opcode == MMC_SEND_STATUS)
826 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
829 cmd.arg = card->rca << 16 | 1;
831 err = mmc_wait_for_cmd(card->host, &cmd, 0);
833 pr_warn("%s: error %d interrupting operation. "
834 "HPI command response %#x\n", mmc_hostname(card->host),
839 *status = cmd.resp[0];
845 * mmc_interrupt_hpi - Issue for High priority Interrupt
846 * @card: the MMC card associated with the HPI transfer
848 * Issued High Priority Interrupt, and check for card status
849 * until out-of prg-state.
851 int mmc_interrupt_hpi(struct mmc_card *card)
855 unsigned long prg_wait;
857 if (!card->ext_csd.hpi_en) {
858 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
862 mmc_claim_host(card->host);
863 err = mmc_send_status(card, &status);
865 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
869 switch (R1_CURRENT_STATE(status)) {
875 * In idle and transfer states, HPI is not needed and the caller
876 * can issue the next intended command immediately
882 /* In all other states, it's illegal to issue HPI */
883 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
884 mmc_hostname(card->host), R1_CURRENT_STATE(status));
889 err = mmc_send_hpi_cmd(card, &status);
893 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
895 err = mmc_send_status(card, &status);
897 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
899 if (time_after(jiffies, prg_wait))
904 mmc_release_host(card->host);
908 int mmc_can_ext_csd(struct mmc_card *card)
910 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
914 * mmc_stop_bkops - stop ongoing BKOPS
915 * @card: MMC card to check BKOPS
917 * Send HPI command to stop ongoing background operations to
918 * allow rapid servicing of foreground operations, e.g. read/
919 * writes. Wait until the card comes out of the programming state
920 * to avoid errors in servicing read/write requests.
922 int mmc_stop_bkops(struct mmc_card *card)
926 err = mmc_interrupt_hpi(card);
929 * If err is EINVAL, we can't issue an HPI.
930 * It should complete the BKOPS.
932 if (!err || (err == -EINVAL)) {
933 mmc_card_clr_doing_bkops(card);
934 mmc_retune_release(card->host);
941 static int mmc_read_bkops_status(struct mmc_card *card)
946 mmc_claim_host(card->host);
947 err = mmc_get_ext_csd(card, &ext_csd);
948 mmc_release_host(card->host);
952 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
953 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
959 * mmc_start_bkops - start BKOPS for supported cards
960 * @card: MMC card to start BKOPS
961 * @form_exception: A flag to indicate if this function was
962 * called due to an exception raised by the card
964 * Start background operations whenever requested.
965 * When the urgent BKOPS bit is set in a R1 command response
966 * then background operations should be started immediately.
968 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
972 bool use_busy_signal;
974 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
977 err = mmc_read_bkops_status(card);
979 pr_err("%s: Failed to read bkops status: %d\n",
980 mmc_hostname(card->host), err);
984 if (!card->ext_csd.raw_bkops_status)
987 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
991 mmc_claim_host(card->host);
992 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
993 timeout = MMC_OPS_TIMEOUT_MS;
994 use_busy_signal = true;
997 use_busy_signal = false;
1000 mmc_retune_hold(card->host);
1002 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1003 EXT_CSD_BKOPS_START, 1, timeout, 0,
1004 use_busy_signal, true, false);
1006 pr_warn("%s: Error %d starting bkops\n",
1007 mmc_hostname(card->host), err);
1008 mmc_retune_release(card->host);
1013 * For urgent bkops status (LEVEL_2 and more)
1014 * bkops executed synchronously, otherwise
1015 * the operation is in progress
1017 if (!use_busy_signal)
1018 mmc_card_set_doing_bkops(card);
1020 mmc_retune_release(card->host);
1022 mmc_release_host(card->host);
1026 * Flush the cache to the non-volatile storage.
1028 int mmc_flush_cache(struct mmc_card *card)
1032 if (mmc_card_mmc(card) &&
1033 (card->ext_csd.cache_size > 0) &&
1034 (card->ext_csd.cache_ctrl & 1)) {
1035 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1036 EXT_CSD_FLUSH_CACHE, 1, 0);
1038 pr_err("%s: cache flush error %d\n",
1039 mmc_hostname(card->host), err);
1044 EXPORT_SYMBOL(mmc_flush_cache);
1046 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1048 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1051 if (!card->ext_csd.cmdq_support)
1054 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1055 val, card->ext_csd.generic_cmd6_time);
1057 card->ext_csd.cmdq_en = enable;
1062 int mmc_cmdq_enable(struct mmc_card *card)
1064 return mmc_cmdq_switch(card, true);
1066 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1068 int mmc_cmdq_disable(struct mmc_card *card)
1070 return mmc_cmdq_switch(card, false);
1072 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);