1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for Realtek PCI-Express card reader
5 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
8 * Wei WANG (wei_wang@realsil.com.cn)
9 * Micky Ching (micky_ching@realsil.com.cn)
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
14 #include <linux/sched.h>
18 /***********************************************************************
19 * Scatter-gather transfer buffer access routines
20 ***********************************************************************/
23 * Copy a buffer of length buflen to/from the srb's transfer buffer.
24 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
25 * points to a list of s-g entries and we ignore srb->request_bufflen.
26 * For non-scatter-gather transfers, srb->request_buffer points to the
27 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
28 * Update the *index and *offset variables so that the next copy will
29 * pick up from where this one left off.
32 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
34 struct scsi_cmnd *srb,
37 enum xfer_buf_dir dir)
41 /* If not using scatter-gather, just transfer the data directly. */
42 if (scsi_sg_count(srb) == 0) {
43 unsigned char *sgbuffer;
45 if (*offset >= scsi_bufflen(srb))
47 cnt = min(buflen, scsi_bufflen(srb) - *offset);
49 sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
51 if (dir == TO_XFER_BUF)
52 memcpy(sgbuffer, buffer, cnt);
54 memcpy(buffer, sgbuffer, cnt);
58 * Using scatter-gather. We have to go through the list one entry
59 * at a time. Each s-g entry contains some number of pages which
60 * have to be copied one at a time.
63 struct scatterlist *sg =
64 (struct scatterlist *)scsi_sglist(srb)
68 * This loop handles a single s-g list entry, which may
69 * include multiple pages. Find the initial page structure
70 * and the starting offset within the page, and update
71 * the *offset and *index values for the next loop.
74 while (cnt < buflen && *index < scsi_sg_count(srb)) {
75 struct page *page = sg_page(sg) +
76 ((sg->offset + *offset) >> PAGE_SHIFT);
77 unsigned int poff = (sg->offset + *offset) &
79 unsigned int sglen = sg->length - *offset;
81 if (sglen > buflen - cnt) {
82 /* Transfer ends within this s-g entry */
86 /* Transfer continues to next s-g entry */
93 unsigned int plen = min(sglen, (unsigned int)
96 if (dir == TO_XFER_BUF)
97 memcpy_to_page(page, poff, buffer + cnt, plen);
99 memcpy_from_page(buffer + cnt, page, poff, plen);
101 /* Start at the beginning of the next page */
110 /* Return the amount actually transferred */
115 * Store the contents of buffer into srb's transfer buffer and set the
118 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
119 unsigned int buflen, struct scsi_cmnd *srb)
121 unsigned int index = 0, offset = 0;
123 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
125 if (buflen < scsi_bufflen(srb))
126 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
129 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
130 unsigned int buflen, struct scsi_cmnd *srb)
132 unsigned int index = 0, offset = 0;
134 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
136 if (buflen < scsi_bufflen(srb))
137 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
140 /***********************************************************************
142 ***********************************************************************/
145 * Invoke the transport and basic error-handling/recovery methods
147 * This is used to send the message to the device and receive the response.
149 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
153 result = rtsx_scsi_handler(srb, chip);
156 * if the command gets aborted by the higher layers, we need to
157 * short-circuit all other processing.
159 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
160 dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
161 srb->result = DID_ABORT << 16;
165 /* if there is a transport error, reset and don't auto-sense */
166 if (result == TRANSPORT_ERROR) {
167 dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
168 srb->result = DID_ERROR << 16;
172 srb->result = SAM_STAT_GOOD;
175 * If we have a failure, we're going to do a REQUEST_SENSE
176 * automatically. Note that we differentiate between a command
177 * "failure" and an "error" in the transport mechanism.
179 if (result == TRANSPORT_FAILED) {
180 /* set the result so the higher layers expect this data */
181 srb->result = SAM_STAT_CHECK_CONDITION;
182 memcpy(srb->sense_buffer,
183 (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
184 sizeof(struct sense_data_t));
193 void rtsx_add_cmd(struct rtsx_chip *chip,
194 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
196 __le32 *cb = (__le32 *)(chip->host_cmds_ptr);
199 val |= (u32)(cmd_type & 0x03) << 30;
200 val |= (u32)(reg_addr & 0x3FFF) << 16;
201 val |= (u32)mask << 8;
204 spin_lock_irq(&chip->rtsx->reg_lock);
205 if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
206 cb[(chip->ci)++] = cpu_to_le32(val);
208 spin_unlock_irq(&chip->rtsx->reg_lock);
211 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
215 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
217 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
218 /* Hardware Auto Response */
220 rtsx_writel(chip, RTSX_HCBCTLR, val);
223 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
225 struct rtsx_dev *rtsx = chip->rtsx;
226 struct completion trans_done;
232 rtsx->check_card_cd = SD_EXIST;
233 else if (card == MS_CARD)
234 rtsx->check_card_cd = MS_EXIST;
235 else if (card == XD_CARD)
236 rtsx->check_card_cd = XD_EXIST;
238 rtsx->check_card_cd = 0;
240 spin_lock_irq(&rtsx->reg_lock);
242 /* set up data structures for the wakeup system */
243 rtsx->done = &trans_done;
244 rtsx->trans_result = TRANS_NOT_READY;
245 init_completion(&trans_done);
246 rtsx->trans_state = STATE_TRANS_CMD;
248 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
250 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
251 /* Hardware Auto Response */
253 rtsx_writel(chip, RTSX_HCBCTLR, val);
255 spin_unlock_irq(&rtsx->reg_lock);
257 /* Wait for TRANS_OK_INT */
258 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
259 msecs_to_jiffies(timeout));
261 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
264 goto finish_send_cmd;
267 spin_lock_irq(&rtsx->reg_lock);
268 if (rtsx->trans_result == TRANS_RESULT_FAIL)
270 else if (rtsx->trans_result == TRANS_RESULT_OK)
273 spin_unlock_irq(&rtsx->reg_lock);
277 rtsx->trans_state = STATE_TRANS_NONE;
280 rtsx_stop_cmd(chip, card);
285 static inline void rtsx_add_sg_tbl(struct rtsx_chip *chip,
286 u32 addr, u32 len, u8 option)
288 __le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
296 temp_opt = option & (~RTSX_SG_END);
301 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
303 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
304 sgb[(chip->sgi)++] = cpu_to_le64(val);
311 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
312 struct scatterlist *sg, int num_sg,
314 unsigned int *offset, int size,
315 enum dma_data_direction dma_dir,
318 struct rtsx_dev *rtsx = chip->rtsx;
319 struct completion trans_done;
321 int sg_cnt, i, resid;
324 struct scatterlist *sg_ptr;
327 if (!sg || num_sg <= 0 || !offset || !index)
330 if (dma_dir == DMA_TO_DEVICE)
331 dir = HOST_TO_DEVICE;
332 else if (dma_dir == DMA_FROM_DEVICE)
333 dir = DEVICE_TO_HOST;
338 rtsx->check_card_cd = SD_EXIST;
339 else if (card == MS_CARD)
340 rtsx->check_card_cd = MS_EXIST;
341 else if (card == XD_CARD)
342 rtsx->check_card_cd = XD_EXIST;
344 rtsx->check_card_cd = 0;
346 spin_lock_irq(&rtsx->reg_lock);
348 /* set up data structures for the wakeup system */
349 rtsx->done = &trans_done;
351 rtsx->trans_state = STATE_TRANS_SG;
352 rtsx->trans_result = TRANS_NOT_READY;
354 spin_unlock_irq(&rtsx->reg_lock);
356 sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
362 * Usually the next entry will be @sg@ + 1, but if this sg element
363 * is part of a chained scatterlist, it could jump to the start of
364 * a new scatterlist array. So here we use sg_next to move to
367 for (i = 0; i < *index; i++)
368 sg_ptr = sg_next(sg_ptr);
369 for (i = *index; i < sg_cnt; i++) {
374 addr = sg_dma_address(sg_ptr);
375 len = sg_dma_len(sg_ptr);
377 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
378 (unsigned int)addr, len);
379 dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
384 if ((len - *offset) > resid) {
389 resid -= (len - *offset);
394 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
395 if ((i == sg_cnt - 1) || !resid)
396 option |= RTSX_SG_END;
398 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
403 sg_ptr = sg_next(sg_ptr);
406 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
408 val |= (u32)(dir & 0x01) << 29;
411 spin_lock_irq(&rtsx->reg_lock);
413 init_completion(&trans_done);
415 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
416 rtsx_writel(chip, RTSX_HDBCTLR, val);
418 spin_unlock_irq(&rtsx->reg_lock);
420 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
421 msecs_to_jiffies(timeout));
423 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
425 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
431 spin_lock_irq(&rtsx->reg_lock);
432 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
434 spin_unlock_irq(&rtsx->reg_lock);
437 spin_unlock_irq(&rtsx->reg_lock);
439 /* Wait for TRANS_OK_INT */
440 spin_lock_irq(&rtsx->reg_lock);
441 if (rtsx->trans_result == TRANS_NOT_READY) {
442 init_completion(&trans_done);
443 spin_unlock_irq(&rtsx->reg_lock);
444 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
445 msecs_to_jiffies(timeout));
447 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
449 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
455 spin_unlock_irq(&rtsx->reg_lock);
458 spin_lock_irq(&rtsx->reg_lock);
459 if (rtsx->trans_result == TRANS_RESULT_FAIL)
461 else if (rtsx->trans_result == TRANS_RESULT_OK)
464 spin_unlock_irq(&rtsx->reg_lock);
468 rtsx->trans_state = STATE_TRANS_NONE;
469 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
472 rtsx_stop_cmd(chip, card);
477 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
478 struct scatterlist *sg, int num_sg,
479 enum dma_data_direction dma_dir,
482 struct rtsx_dev *rtsx = chip->rtsx;
483 struct completion trans_done;
488 struct scatterlist *sg_ptr;
490 if (!sg || num_sg <= 0)
493 if (dma_dir == DMA_TO_DEVICE)
494 dir = HOST_TO_DEVICE;
495 else if (dma_dir == DMA_FROM_DEVICE)
496 dir = DEVICE_TO_HOST;
501 rtsx->check_card_cd = SD_EXIST;
502 else if (card == MS_CARD)
503 rtsx->check_card_cd = MS_EXIST;
504 else if (card == XD_CARD)
505 rtsx->check_card_cd = XD_EXIST;
507 rtsx->check_card_cd = 0;
509 spin_lock_irq(&rtsx->reg_lock);
511 /* set up data structures for the wakeup system */
512 rtsx->done = &trans_done;
514 rtsx->trans_state = STATE_TRANS_SG;
515 rtsx->trans_result = TRANS_NOT_READY;
517 spin_unlock_irq(&rtsx->reg_lock);
519 buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
523 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
527 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
528 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
530 sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
533 for (j = 0; j < sg_cnt; j++) {
534 dma_addr_t addr = sg_dma_address(sg_ptr);
535 unsigned int len = sg_dma_len(sg_ptr);
538 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
539 (unsigned int)addr, len);
541 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
542 if (j == (sg_cnt - 1))
543 option |= RTSX_SG_END;
545 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
547 sg_ptr = sg_next(sg_ptr);
550 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
552 val |= (u32)(dir & 0x01) << 29;
555 spin_lock_irq(&rtsx->reg_lock);
557 init_completion(&trans_done);
559 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
560 rtsx_writel(chip, RTSX_HDBCTLR, val);
562 spin_unlock_irq(&rtsx->reg_lock);
564 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
565 msecs_to_jiffies(timeout));
567 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
569 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
575 spin_lock_irq(&rtsx->reg_lock);
576 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
578 spin_unlock_irq(&rtsx->reg_lock);
581 spin_unlock_irq(&rtsx->reg_lock);
586 /* Wait for TRANS_OK_INT */
587 spin_lock_irq(&rtsx->reg_lock);
588 if (rtsx->trans_result == TRANS_NOT_READY) {
589 init_completion(&trans_done);
590 spin_unlock_irq(&rtsx->reg_lock);
591 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
592 msecs_to_jiffies(timeout));
594 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
596 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
602 spin_unlock_irq(&rtsx->reg_lock);
605 spin_lock_irq(&rtsx->reg_lock);
606 if (rtsx->trans_result == TRANS_RESULT_FAIL)
608 else if (rtsx->trans_result == TRANS_RESULT_OK)
611 spin_unlock_irq(&rtsx->reg_lock);
615 rtsx->trans_state = STATE_TRANS_NONE;
616 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
619 rtsx_stop_cmd(chip, card);
624 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
625 size_t len, enum dma_data_direction dma_dir,
628 struct rtsx_dev *rtsx = chip->rtsx;
629 struct completion trans_done;
636 if (!buf || len <= 0)
639 if (dma_dir == DMA_TO_DEVICE)
640 dir = HOST_TO_DEVICE;
641 else if (dma_dir == DMA_FROM_DEVICE)
642 dir = DEVICE_TO_HOST;
646 addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
647 if (dma_mapping_error(&rtsx->pci->dev, addr))
651 rtsx->check_card_cd = SD_EXIST;
652 else if (card == MS_CARD)
653 rtsx->check_card_cd = MS_EXIST;
654 else if (card == XD_CARD)
655 rtsx->check_card_cd = XD_EXIST;
657 rtsx->check_card_cd = 0;
659 val |= (u32)(dir & 0x01) << 29;
660 val |= (u32)(len & 0x00FFFFFF);
662 spin_lock_irq(&rtsx->reg_lock);
664 /* set up data structures for the wakeup system */
665 rtsx->done = &trans_done;
667 init_completion(&trans_done);
669 rtsx->trans_state = STATE_TRANS_BUF;
670 rtsx->trans_result = TRANS_NOT_READY;
672 rtsx_writel(chip, RTSX_HDBAR, addr);
673 rtsx_writel(chip, RTSX_HDBCTLR, val);
675 spin_unlock_irq(&rtsx->reg_lock);
677 /* Wait for TRANS_OK_INT */
678 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
679 msecs_to_jiffies(timeout));
681 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
683 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
689 spin_lock_irq(&rtsx->reg_lock);
690 if (rtsx->trans_result == TRANS_RESULT_FAIL)
692 else if (rtsx->trans_result == TRANS_RESULT_OK)
695 spin_unlock_irq(&rtsx->reg_lock);
699 rtsx->trans_state = STATE_TRANS_NONE;
700 dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
703 rtsx_stop_cmd(chip, card);
708 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
709 void *buf, size_t len, int use_sg,
710 unsigned int *index, unsigned int *offset,
711 enum dma_data_direction dma_dir, int timeout)
715 /* don't transfer data during abort processing */
716 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
720 struct scatterlist *sg = buf;
722 err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
723 index, offset, (int)len,
726 err = rtsx_transfer_buf(chip, card,
727 buf, len, dma_dir, timeout);
730 if (RTSX_TST_DELINK(chip)) {
731 RTSX_CLR_DELINK(chip);
732 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
733 rtsx_reinit_cards(chip, 1);
740 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
741 int use_sg, enum dma_data_direction dma_dir, int timeout)
745 dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
747 /* don't transfer data during abort processing */
748 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
752 err = rtsx_transfer_sglist_adma(chip, card, buf,
753 use_sg, dma_dir, timeout);
755 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
759 if (RTSX_TST_DELINK(chip)) {
760 RTSX_CLR_DELINK(chip);
761 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
762 rtsx_reinit_cards(chip, 1);