1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007-2008 Pierre Ossman
6 #include <linux/mmc/core.h>
7 #include <linux/mmc/card.h>
8 #include <linux/mmc/host.h>
9 #include <linux/mmc/mmc.h>
10 #include <linux/slab.h>
12 #include <linux/scatterlist.h>
13 #include <linux/swap.h> /* For nr_free_buffer_pages() */
14 #include <linux/list.h>
16 #include <linux/debugfs.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages {
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages *arr;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
74 * @sg_areq: scatterlist for non-blocking request
76 struct mmc_test_area {
78 unsigned int dev_addr;
80 unsigned int max_segs;
81 unsigned int max_seg_sz;
84 struct mmc_test_mem *mem;
85 struct scatterlist *sg;
86 struct scatterlist *sg_areq;
90 * struct mmc_test_transfer_result - transfer results for performance tests.
91 * @link: double-linked list
92 * @count: amount of group of sectors to check
93 * @sectors: amount of sectors to check in one group
94 * @ts: time values of transfer
95 * @rate: calculated transfer rate
96 * @iops: I/O operations per second (times 100)
98 struct mmc_test_transfer_result {
99 struct list_head link;
101 unsigned int sectors;
102 struct timespec64 ts;
108 * struct mmc_test_general_result - results for tests.
109 * @link: double-linked list
110 * @card: card under test
111 * @testcase: number of test case
112 * @result: result of test run
113 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
115 struct mmc_test_general_result {
116 struct list_head link;
117 struct mmc_card *card;
120 struct list_head tr_lst;
124 * struct mmc_test_dbgfs_file - debugfs related file.
125 * @link: double-linked list
126 * @card: card under test
127 * @file: file created under debugfs
129 struct mmc_test_dbgfs_file {
130 struct list_head link;
131 struct mmc_card *card;
136 * struct mmc_test_card - test information.
137 * @card: card under test
138 * @scratch: transfer buffer
139 * @buffer: transfer buffer
140 * @highmem: buffer for highmem tests
141 * @area: information for performance tests
142 * @gr: pointer to results of current testcase
144 struct mmc_test_card {
145 struct mmc_card *card;
147 u8 scratch[BUFFER_SIZE];
149 #ifdef CONFIG_HIGHMEM
150 struct page *highmem;
152 struct mmc_test_area area;
153 struct mmc_test_general_result *gr;
156 enum mmc_test_prep_media {
157 MMC_TEST_PREP_NONE = 0,
158 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
159 MMC_TEST_PREP_ERASE = 1 << 1,
162 struct mmc_test_multiple_rw {
163 unsigned int *sg_len;
168 bool do_nonblock_req;
169 enum mmc_test_prep_media prepare;
172 /*******************************************************************/
173 /* General helper functions */
174 /*******************************************************************/
177 * Configure correct block size in card
179 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
181 return mmc_set_blocklen(test->card, size);
184 static bool mmc_test_card_cmd23(struct mmc_card *card)
186 return mmc_card_mmc(card) ||
187 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
190 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
191 struct mmc_request *mrq, unsigned int blocks)
193 struct mmc_card *card = test->card;
195 if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
196 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
197 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
202 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
203 mrq->sbc->arg = blocks;
204 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
208 * Fill in the mmc_request structure given a set of transfer parameters.
210 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
211 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
212 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
214 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
218 mrq->cmd->opcode = write ?
219 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
221 mrq->cmd->opcode = write ?
222 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
225 mrq->cmd->arg = dev_addr;
226 if (!mmc_card_blockaddr(test->card))
229 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
234 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
236 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
239 mrq->data->blksz = blksz;
240 mrq->data->blocks = blocks;
241 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
243 mrq->data->sg_len = sg_len;
245 mmc_test_prepare_sbc(test, mrq, blocks);
247 mmc_set_data_timeout(mrq->data, test->card);
250 static int mmc_test_busy(struct mmc_command *cmd)
252 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
253 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
257 * Wait for the card to finish the busy state
259 static int mmc_test_wait_busy(struct mmc_test_card *test)
262 struct mmc_command cmd = {};
266 memset(&cmd, 0, sizeof(struct mmc_command));
268 cmd.opcode = MMC_SEND_STATUS;
269 cmd.arg = test->card->rca << 16;
270 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
272 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
276 if (!busy && mmc_test_busy(&cmd)) {
278 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
279 pr_info("%s: Warning: Host did not wait for busy state to end.\n",
280 mmc_hostname(test->card->host));
282 } while (mmc_test_busy(&cmd));
288 * Transfer a single sector of kernel addressable data
290 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
291 u8 *buffer, unsigned addr, unsigned blksz, int write)
293 struct mmc_request mrq = {};
294 struct mmc_command cmd = {};
295 struct mmc_command stop = {};
296 struct mmc_data data = {};
298 struct scatterlist sg;
304 sg_init_one(&sg, buffer, blksz);
306 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
308 mmc_wait_for_req(test->card->host, &mrq);
315 return mmc_test_wait_busy(test);
318 static void mmc_test_free_mem(struct mmc_test_mem *mem)
323 __free_pages(mem->arr[mem->cnt].page,
324 mem->arr[mem->cnt].order);
330 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
331 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
332 * not exceed a maximum number of segments and try not to make segments much
333 * bigger than maximum segment size.
335 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
336 unsigned long max_sz,
337 unsigned int max_segs,
338 unsigned int max_seg_sz)
340 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
341 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
342 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
343 unsigned long page_cnt = 0;
344 unsigned long limit = nr_free_buffer_pages() >> 4;
345 struct mmc_test_mem *mem;
347 if (max_page_cnt > limit)
348 max_page_cnt = limit;
349 if (min_page_cnt > max_page_cnt)
350 min_page_cnt = max_page_cnt;
352 if (max_seg_page_cnt > max_page_cnt)
353 max_seg_page_cnt = max_page_cnt;
355 if (max_segs > max_page_cnt)
356 max_segs = max_page_cnt;
358 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
362 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
366 while (max_page_cnt) {
369 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
372 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
374 page = alloc_pages(flags, order);
380 if (page_cnt < min_page_cnt)
384 mem->arr[mem->cnt].page = page;
385 mem->arr[mem->cnt].order = order;
387 if (max_page_cnt <= (1UL << order))
389 max_page_cnt -= 1UL << order;
390 page_cnt += 1UL << order;
391 if (mem->cnt >= max_segs) {
392 if (page_cnt < min_page_cnt)
401 mmc_test_free_mem(mem);
406 * Map memory into a scatterlist. Optionally allow the same memory to be
407 * mapped more than once.
409 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
410 struct scatterlist *sglist, int repeat,
411 unsigned int max_segs, unsigned int max_seg_sz,
412 unsigned int *sg_len, int min_sg_len)
414 struct scatterlist *sg = NULL;
416 unsigned long sz = size;
418 sg_init_table(sglist, max_segs);
419 if (min_sg_len > max_segs)
420 min_sg_len = max_segs;
424 for (i = 0; i < mem->cnt; i++) {
425 unsigned long len = PAGE_SIZE << mem->arr[i].order;
427 if (min_sg_len && (size / min_sg_len < len))
428 len = ALIGN(size / min_sg_len, 512);
431 if (len > max_seg_sz)
439 sg_set_page(sg, mem->arr[i].page, len, 0);
445 } while (sz && repeat);
457 * Map memory into a scatterlist so that no pages are contiguous. Allow the
458 * same memory to be mapped more than once.
460 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
462 struct scatterlist *sglist,
463 unsigned int max_segs,
464 unsigned int max_seg_sz,
465 unsigned int *sg_len)
467 struct scatterlist *sg = NULL;
468 unsigned int i = mem->cnt, cnt;
470 void *base, *addr, *last_addr = NULL;
472 sg_init_table(sglist, max_segs);
476 base = page_address(mem->arr[--i].page);
477 cnt = 1 << mem->arr[i].order;
479 addr = base + PAGE_SIZE * --cnt;
480 if (last_addr && last_addr + PAGE_SIZE == addr)
484 if (len > max_seg_sz)
494 sg_set_page(sg, virt_to_page(addr), len, 0);
509 * Calculate transfer rate in bytes per second.
511 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
515 ns = timespec64_to_ns(ts);
518 while (ns > UINT_MAX) {
526 do_div(bytes, (uint32_t)ns);
532 * Save transfer results for future usage
534 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
535 unsigned int count, unsigned int sectors, struct timespec64 ts,
536 unsigned int rate, unsigned int iops)
538 struct mmc_test_transfer_result *tr;
543 tr = kmalloc(sizeof(*tr), GFP_KERNEL);
548 tr->sectors = sectors;
553 list_add_tail(&tr->link, &test->gr->tr_lst);
557 * Print the transfer rate.
559 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
560 struct timespec64 *ts1, struct timespec64 *ts2)
562 unsigned int rate, iops, sectors = bytes >> 9;
563 struct timespec64 ts;
565 ts = timespec64_sub(*ts2, *ts1);
567 rate = mmc_test_rate(bytes, &ts);
568 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
570 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
571 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
572 mmc_hostname(test->card->host), sectors, sectors >> 1,
573 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
574 (u32)ts.tv_nsec, rate / 1000, rate / 1024,
575 iops / 100, iops % 100);
577 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
581 * Print the average transfer rate.
583 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
584 unsigned int count, struct timespec64 *ts1,
585 struct timespec64 *ts2)
587 unsigned int rate, iops, sectors = bytes >> 9;
588 uint64_t tot = bytes * count;
589 struct timespec64 ts;
591 ts = timespec64_sub(*ts2, *ts1);
593 rate = mmc_test_rate(tot, &ts);
594 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
596 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
597 "%llu.%09u seconds (%u kB/s, %u KiB/s, "
598 "%u.%02u IOPS, sg_len %d)\n",
599 mmc_hostname(test->card->host), count, sectors, count,
600 sectors >> 1, (sectors & 1 ? ".5" : ""),
601 (u64)ts.tv_sec, (u32)ts.tv_nsec,
602 rate / 1000, rate / 1024, iops / 100, iops % 100,
605 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
609 * Return the card size in sectors.
611 static unsigned int mmc_test_capacity(struct mmc_card *card)
613 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
614 return card->ext_csd.sectors;
616 return card->csd.capacity << (card->csd.read_blkbits - 9);
619 /*******************************************************************/
620 /* Test preparation and cleanup */
621 /*******************************************************************/
624 * Fill the first couple of sectors of the card with known data
625 * so that bad reads/writes can be detected
627 static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val)
631 ret = mmc_test_set_blksize(test, 512);
636 memset(test->buffer, val, 512);
638 for (i = 0; i < 512; i++)
642 for (i = 0; i < BUFFER_SIZE / 512; i++) {
643 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
651 static int mmc_test_prepare_write(struct mmc_test_card *test)
653 return __mmc_test_prepare(test, 1, 0xDF);
656 static int mmc_test_prepare_read(struct mmc_test_card *test)
658 return __mmc_test_prepare(test, 0, 0);
661 static int mmc_test_cleanup(struct mmc_test_card *test)
663 return __mmc_test_prepare(test, 1, 0);
666 /*******************************************************************/
667 /* Test execution helpers */
668 /*******************************************************************/
671 * Modifies the mmc_request to perform the "short transfer" tests
673 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
674 struct mmc_request *mrq, int write)
676 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
679 if (mrq->data->blocks > 1) {
680 mrq->cmd->opcode = write ?
681 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
684 mrq->cmd->opcode = MMC_SEND_STATUS;
685 mrq->cmd->arg = test->card->rca << 16;
690 * Checks that a normal transfer didn't have any errors
692 static int mmc_test_check_result(struct mmc_test_card *test,
693 struct mmc_request *mrq)
697 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
702 if (mrq->sbc && mrq->sbc->error)
703 ret = mrq->sbc->error;
704 if (!ret && mrq->cmd->error)
705 ret = mrq->cmd->error;
706 if (!ret && mrq->data->error)
707 ret = mrq->data->error;
708 if (!ret && mrq->stop && mrq->stop->error)
709 ret = mrq->stop->error;
710 if (!ret && mrq->data->bytes_xfered !=
711 mrq->data->blocks * mrq->data->blksz)
715 ret = RESULT_UNSUP_HOST;
721 * Checks that a "short transfer" behaved as expected
723 static int mmc_test_check_broken_result(struct mmc_test_card *test,
724 struct mmc_request *mrq)
728 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
733 if (!ret && mrq->cmd->error)
734 ret = mrq->cmd->error;
735 if (!ret && mrq->data->error == 0)
737 if (!ret && mrq->data->error != -ETIMEDOUT)
738 ret = mrq->data->error;
739 if (!ret && mrq->stop && mrq->stop->error)
740 ret = mrq->stop->error;
741 if (mrq->data->blocks > 1) {
742 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
745 if (!ret && mrq->data->bytes_xfered > 0)
750 ret = RESULT_UNSUP_HOST;
755 struct mmc_test_req {
756 struct mmc_request mrq;
757 struct mmc_command sbc;
758 struct mmc_command cmd;
759 struct mmc_command stop;
760 struct mmc_command status;
761 struct mmc_data data;
765 * Tests nonblock transfer with certain parameters
767 static void mmc_test_req_reset(struct mmc_test_req *rq)
769 memset(rq, 0, sizeof(struct mmc_test_req));
771 rq->mrq.cmd = &rq->cmd;
772 rq->mrq.data = &rq->data;
773 rq->mrq.stop = &rq->stop;
776 static struct mmc_test_req *mmc_test_req_alloc(void)
778 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
781 mmc_test_req_reset(rq);
786 static void mmc_test_wait_done(struct mmc_request *mrq)
788 complete(&mrq->completion);
791 static int mmc_test_start_areq(struct mmc_test_card *test,
792 struct mmc_request *mrq,
793 struct mmc_request *prev_mrq)
795 struct mmc_host *host = test->card->host;
799 init_completion(&mrq->completion);
800 mrq->done = mmc_test_wait_done;
801 mmc_pre_req(host, mrq);
805 wait_for_completion(&prev_mrq->completion);
806 err = mmc_test_wait_busy(test);
808 err = mmc_test_check_result(test, prev_mrq);
812 err = mmc_start_request(host, mrq);
814 mmc_retune_release(host);
818 mmc_post_req(host, prev_mrq, 0);
821 mmc_post_req(host, mrq, err);
826 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
827 unsigned int dev_addr, int write,
830 struct mmc_test_req *rq1, *rq2;
831 struct mmc_request *mrq, *prev_mrq;
834 struct mmc_test_area *t = &test->area;
835 struct scatterlist *sg = t->sg;
836 struct scatterlist *sg_areq = t->sg_areq;
838 rq1 = mmc_test_req_alloc();
839 rq2 = mmc_test_req_alloc();
848 for (i = 0; i < count; i++) {
849 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
850 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
851 t->blocks, 512, write);
852 ret = mmc_test_start_areq(test, mrq, prev_mrq);
857 prev_mrq = &rq2->mrq;
861 dev_addr += t->blocks;
864 ret = mmc_test_start_areq(test, NULL, prev_mrq);
872 * Tests a basic transfer with certain parameters
874 static int mmc_test_simple_transfer(struct mmc_test_card *test,
875 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
876 unsigned blocks, unsigned blksz, int write)
878 struct mmc_request mrq = {};
879 struct mmc_command cmd = {};
880 struct mmc_command stop = {};
881 struct mmc_data data = {};
887 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
888 blocks, blksz, write);
890 mmc_wait_for_req(test->card->host, &mrq);
892 mmc_test_wait_busy(test);
894 return mmc_test_check_result(test, &mrq);
898 * Tests a transfer where the card will fail completely or partly
900 static int mmc_test_broken_transfer(struct mmc_test_card *test,
901 unsigned blocks, unsigned blksz, int write)
903 struct mmc_request mrq = {};
904 struct mmc_command cmd = {};
905 struct mmc_command stop = {};
906 struct mmc_data data = {};
908 struct scatterlist sg;
914 sg_init_one(&sg, test->buffer, blocks * blksz);
916 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
917 mmc_test_prepare_broken_mrq(test, &mrq, write);
919 mmc_wait_for_req(test->card->host, &mrq);
921 mmc_test_wait_busy(test);
923 return mmc_test_check_broken_result(test, &mrq);
927 * Does a complete transfer test where data is also validated
929 * Note: mmc_test_prepare() must have been done before this call
931 static int mmc_test_transfer(struct mmc_test_card *test,
932 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
933 unsigned blocks, unsigned blksz, int write)
939 for (i = 0; i < blocks * blksz; i++)
940 test->scratch[i] = i;
942 memset(test->scratch, 0, BUFFER_SIZE);
944 local_irq_save(flags);
945 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
946 local_irq_restore(flags);
948 ret = mmc_test_set_blksize(test, blksz);
952 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
953 blocks, blksz, write);
960 ret = mmc_test_set_blksize(test, 512);
964 sectors = (blocks * blksz + 511) / 512;
965 if ((sectors * 512) == (blocks * blksz))
968 if ((sectors * 512) > BUFFER_SIZE)
971 memset(test->buffer, 0, sectors * 512);
973 for (i = 0; i < sectors; i++) {
974 ret = mmc_test_buffer_transfer(test,
975 test->buffer + i * 512,
976 dev_addr + i, 512, 0);
981 for (i = 0; i < blocks * blksz; i++) {
982 if (test->buffer[i] != (u8)i)
986 for (; i < sectors * 512; i++) {
987 if (test->buffer[i] != 0xDF)
991 local_irq_save(flags);
992 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
993 local_irq_restore(flags);
994 for (i = 0; i < blocks * blksz; i++) {
995 if (test->scratch[i] != (u8)i)
1003 /*******************************************************************/
1005 /*******************************************************************/
1007 struct mmc_test_case {
1010 int (*prepare)(struct mmc_test_card *);
1011 int (*run)(struct mmc_test_card *);
1012 int (*cleanup)(struct mmc_test_card *);
1015 static int mmc_test_basic_write(struct mmc_test_card *test)
1018 struct scatterlist sg;
1020 ret = mmc_test_set_blksize(test, 512);
1024 sg_init_one(&sg, test->buffer, 512);
1026 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1029 static int mmc_test_basic_read(struct mmc_test_card *test)
1032 struct scatterlist sg;
1034 ret = mmc_test_set_blksize(test, 512);
1038 sg_init_one(&sg, test->buffer, 512);
1040 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1043 static int mmc_test_verify_write(struct mmc_test_card *test)
1045 struct scatterlist sg;
1047 sg_init_one(&sg, test->buffer, 512);
1049 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1052 static int mmc_test_verify_read(struct mmc_test_card *test)
1054 struct scatterlist sg;
1056 sg_init_one(&sg, test->buffer, 512);
1058 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1061 static int mmc_test_multi_write(struct mmc_test_card *test)
1064 struct scatterlist sg;
1066 if (test->card->host->max_blk_count == 1)
1067 return RESULT_UNSUP_HOST;
1069 size = PAGE_SIZE * 2;
1070 size = min(size, test->card->host->max_req_size);
1071 size = min(size, test->card->host->max_seg_size);
1072 size = min(size, test->card->host->max_blk_count * 512);
1075 return RESULT_UNSUP_HOST;
1077 sg_init_one(&sg, test->buffer, size);
1079 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1082 static int mmc_test_multi_read(struct mmc_test_card *test)
1085 struct scatterlist sg;
1087 if (test->card->host->max_blk_count == 1)
1088 return RESULT_UNSUP_HOST;
1090 size = PAGE_SIZE * 2;
1091 size = min(size, test->card->host->max_req_size);
1092 size = min(size, test->card->host->max_seg_size);
1093 size = min(size, test->card->host->max_blk_count * 512);
1096 return RESULT_UNSUP_HOST;
1098 sg_init_one(&sg, test->buffer, size);
1100 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1103 static int mmc_test_pow2_write(struct mmc_test_card *test)
1106 struct scatterlist sg;
1108 if (!test->card->csd.write_partial)
1109 return RESULT_UNSUP_CARD;
1111 for (i = 1; i < 512; i <<= 1) {
1112 sg_init_one(&sg, test->buffer, i);
1113 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1121 static int mmc_test_pow2_read(struct mmc_test_card *test)
1124 struct scatterlist sg;
1126 if (!test->card->csd.read_partial)
1127 return RESULT_UNSUP_CARD;
1129 for (i = 1; i < 512; i <<= 1) {
1130 sg_init_one(&sg, test->buffer, i);
1131 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1139 static int mmc_test_weird_write(struct mmc_test_card *test)
1142 struct scatterlist sg;
1144 if (!test->card->csd.write_partial)
1145 return RESULT_UNSUP_CARD;
1147 for (i = 3; i < 512; i += 7) {
1148 sg_init_one(&sg, test->buffer, i);
1149 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1157 static int mmc_test_weird_read(struct mmc_test_card *test)
1160 struct scatterlist sg;
1162 if (!test->card->csd.read_partial)
1163 return RESULT_UNSUP_CARD;
1165 for (i = 3; i < 512; i += 7) {
1166 sg_init_one(&sg, test->buffer, i);
1167 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1175 static int mmc_test_align_write(struct mmc_test_card *test)
1178 struct scatterlist sg;
1180 for (i = 1; i < TEST_ALIGN_END; i++) {
1181 sg_init_one(&sg, test->buffer + i, 512);
1182 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1190 static int mmc_test_align_read(struct mmc_test_card *test)
1193 struct scatterlist sg;
1195 for (i = 1; i < TEST_ALIGN_END; i++) {
1196 sg_init_one(&sg, test->buffer + i, 512);
1197 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1205 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1209 struct scatterlist sg;
1211 if (test->card->host->max_blk_count == 1)
1212 return RESULT_UNSUP_HOST;
1214 size = PAGE_SIZE * 2;
1215 size = min(size, test->card->host->max_req_size);
1216 size = min(size, test->card->host->max_seg_size);
1217 size = min(size, test->card->host->max_blk_count * 512);
1220 return RESULT_UNSUP_HOST;
1222 for (i = 1; i < TEST_ALIGN_END; i++) {
1223 sg_init_one(&sg, test->buffer + i, size);
1224 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1232 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1236 struct scatterlist sg;
1238 if (test->card->host->max_blk_count == 1)
1239 return RESULT_UNSUP_HOST;
1241 size = PAGE_SIZE * 2;
1242 size = min(size, test->card->host->max_req_size);
1243 size = min(size, test->card->host->max_seg_size);
1244 size = min(size, test->card->host->max_blk_count * 512);
1247 return RESULT_UNSUP_HOST;
1249 for (i = 1; i < TEST_ALIGN_END; i++) {
1250 sg_init_one(&sg, test->buffer + i, size);
1251 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1259 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1263 ret = mmc_test_set_blksize(test, 512);
1267 return mmc_test_broken_transfer(test, 1, 512, 1);
1270 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1274 ret = mmc_test_set_blksize(test, 512);
1278 return mmc_test_broken_transfer(test, 1, 512, 0);
1281 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1285 if (test->card->host->max_blk_count == 1)
1286 return RESULT_UNSUP_HOST;
1288 ret = mmc_test_set_blksize(test, 512);
1292 return mmc_test_broken_transfer(test, 2, 512, 1);
1295 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1299 if (test->card->host->max_blk_count == 1)
1300 return RESULT_UNSUP_HOST;
1302 ret = mmc_test_set_blksize(test, 512);
1306 return mmc_test_broken_transfer(test, 2, 512, 0);
1309 #ifdef CONFIG_HIGHMEM
1311 static int mmc_test_write_high(struct mmc_test_card *test)
1313 struct scatterlist sg;
1315 sg_init_table(&sg, 1);
1316 sg_set_page(&sg, test->highmem, 512, 0);
1318 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1321 static int mmc_test_read_high(struct mmc_test_card *test)
1323 struct scatterlist sg;
1325 sg_init_table(&sg, 1);
1326 sg_set_page(&sg, test->highmem, 512, 0);
1328 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1331 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1334 struct scatterlist sg;
1336 if (test->card->host->max_blk_count == 1)
1337 return RESULT_UNSUP_HOST;
1339 size = PAGE_SIZE * 2;
1340 size = min(size, test->card->host->max_req_size);
1341 size = min(size, test->card->host->max_seg_size);
1342 size = min(size, test->card->host->max_blk_count * 512);
1345 return RESULT_UNSUP_HOST;
1347 sg_init_table(&sg, 1);
1348 sg_set_page(&sg, test->highmem, size, 0);
1350 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1353 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1356 struct scatterlist sg;
1358 if (test->card->host->max_blk_count == 1)
1359 return RESULT_UNSUP_HOST;
1361 size = PAGE_SIZE * 2;
1362 size = min(size, test->card->host->max_req_size);
1363 size = min(size, test->card->host->max_seg_size);
1364 size = min(size, test->card->host->max_blk_count * 512);
1367 return RESULT_UNSUP_HOST;
1369 sg_init_table(&sg, 1);
1370 sg_set_page(&sg, test->highmem, size, 0);
1372 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1377 static int mmc_test_no_highmem(struct mmc_test_card *test)
1379 pr_info("%s: Highmem not configured - test skipped\n",
1380 mmc_hostname(test->card->host));
1384 #endif /* CONFIG_HIGHMEM */
1387 * Map sz bytes so that it can be transferred.
1389 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1390 int max_scatter, int min_sg_len, bool nonblock)
1392 struct mmc_test_area *t = &test->area;
1394 unsigned int sg_len = 0;
1396 t->blocks = sz >> 9;
1399 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1400 t->max_segs, t->max_seg_sz,
1403 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1404 t->max_seg_sz, &t->sg_len, min_sg_len);
1407 if (err || !nonblock)
1411 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
1412 t->max_segs, t->max_seg_sz,
1415 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
1416 t->max_seg_sz, &sg_len, min_sg_len);
1418 if (!err && sg_len != t->sg_len)
1423 pr_info("%s: Failed to map sg list\n",
1424 mmc_hostname(test->card->host));
1429 * Transfer bytes mapped by mmc_test_area_map().
1431 static int mmc_test_area_transfer(struct mmc_test_card *test,
1432 unsigned int dev_addr, int write)
1434 struct mmc_test_area *t = &test->area;
1436 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1437 t->blocks, 512, write);
1441 * Map and transfer bytes for multiple transfers.
1443 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1444 unsigned int dev_addr, int write,
1445 int max_scatter, int timed, int count,
1446 bool nonblock, int min_sg_len)
1448 struct timespec64 ts1, ts2;
1453 * In the case of a maximally scattered transfer, the maximum transfer
1454 * size is further limited by using PAGE_SIZE segments.
1457 struct mmc_test_area *t = &test->area;
1458 unsigned long max_tfr;
1460 if (t->max_seg_sz >= PAGE_SIZE)
1461 max_tfr = t->max_segs * PAGE_SIZE;
1463 max_tfr = t->max_segs * t->max_seg_sz;
1468 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
1473 ktime_get_ts64(&ts1);
1475 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
1477 for (i = 0; i < count && ret == 0; i++) {
1478 ret = mmc_test_area_transfer(test, dev_addr, write);
1479 dev_addr += sz >> 9;
1486 ktime_get_ts64(&ts2);
1489 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1494 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1495 unsigned int dev_addr, int write, int max_scatter,
1498 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1499 timed, 1, false, 0);
1503 * Write the test area entirely.
1505 static int mmc_test_area_fill(struct mmc_test_card *test)
1507 struct mmc_test_area *t = &test->area;
1509 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1513 * Erase the test area entirely.
1515 static int mmc_test_area_erase(struct mmc_test_card *test)
1517 struct mmc_test_area *t = &test->area;
1519 if (!mmc_can_erase(test->card))
1522 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1527 * Cleanup struct mmc_test_area.
1529 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1531 struct mmc_test_area *t = &test->area;
1535 mmc_test_free_mem(t->mem);
1541 * Initialize an area for testing large transfers. The test area is set to the
1542 * middle of the card because cards may have different characteristics at the
1543 * front (for FAT file system optimization). Optionally, the area is erased
1544 * (if the card supports it) which may improve write performance. Optionally,
1545 * the area is filled with data for subsequent read tests.
1547 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1549 struct mmc_test_area *t = &test->area;
1550 unsigned long min_sz = 64 * 1024, sz;
1553 ret = mmc_test_set_blksize(test, 512);
1557 /* Make the test area size about 4MiB */
1558 sz = (unsigned long)test->card->pref_erase << 9;
1560 while (t->max_sz < 4 * 1024 * 1024)
1562 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1565 t->max_segs = test->card->host->max_segs;
1566 t->max_seg_sz = test->card->host->max_seg_size;
1567 t->max_seg_sz -= t->max_seg_sz % 512;
1569 t->max_tfr = t->max_sz;
1570 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1571 t->max_tfr = test->card->host->max_blk_count << 9;
1572 if (t->max_tfr > test->card->host->max_req_size)
1573 t->max_tfr = test->card->host->max_req_size;
1574 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1575 t->max_tfr = t->max_segs * t->max_seg_sz;
1578 * Try to allocate enough memory for a max. sized transfer. Less is OK
1579 * because the same memory can be mapped into the scatterlist more than
1580 * once. Also, take into account the limits imposed on scatterlist
1581 * segments by the host driver.
1583 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1588 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
1594 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
1601 t->dev_addr = mmc_test_capacity(test->card) / 2;
1602 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1605 ret = mmc_test_area_erase(test);
1611 ret = mmc_test_area_fill(test);
1619 mmc_test_area_cleanup(test);
1624 * Prepare for large transfers. Do not erase the test area.
1626 static int mmc_test_area_prepare(struct mmc_test_card *test)
1628 return mmc_test_area_init(test, 0, 0);
1632 * Prepare for large transfers. Do erase the test area.
1634 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1636 return mmc_test_area_init(test, 1, 0);
1640 * Prepare for large transfers. Erase and fill the test area.
1642 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1644 return mmc_test_area_init(test, 1, 1);
1648 * Test best-case performance. Best-case performance is expected from
1649 * a single large transfer.
1651 * An additional option (max_scatter) allows the measurement of the same
1652 * transfer but with no contiguous pages in the scatter list. This tests
1653 * the efficiency of DMA to handle scattered pages.
1655 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1658 struct mmc_test_area *t = &test->area;
1660 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1665 * Best-case read performance.
1667 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1669 return mmc_test_best_performance(test, 0, 0);
1673 * Best-case write performance.
1675 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1677 return mmc_test_best_performance(test, 1, 0);
1681 * Best-case read performance into scattered pages.
1683 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1685 return mmc_test_best_performance(test, 0, 1);
1689 * Best-case write performance from scattered pages.
1691 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1693 return mmc_test_best_performance(test, 1, 1);
1697 * Single read performance by transfer size.
1699 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1701 struct mmc_test_area *t = &test->area;
1703 unsigned int dev_addr;
1706 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1707 dev_addr = t->dev_addr + (sz >> 9);
1708 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1713 dev_addr = t->dev_addr;
1714 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1718 * Single write performance by transfer size.
1720 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1722 struct mmc_test_area *t = &test->area;
1724 unsigned int dev_addr;
1727 ret = mmc_test_area_erase(test);
1730 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1731 dev_addr = t->dev_addr + (sz >> 9);
1732 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1736 ret = mmc_test_area_erase(test);
1740 dev_addr = t->dev_addr;
1741 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1745 * Single trim performance by transfer size.
1747 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1749 struct mmc_test_area *t = &test->area;
1751 unsigned int dev_addr;
1752 struct timespec64 ts1, ts2;
1755 if (!mmc_can_trim(test->card))
1756 return RESULT_UNSUP_CARD;
1758 if (!mmc_can_erase(test->card))
1759 return RESULT_UNSUP_HOST;
1761 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1762 dev_addr = t->dev_addr + (sz >> 9);
1763 ktime_get_ts64(&ts1);
1764 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1767 ktime_get_ts64(&ts2);
1768 mmc_test_print_rate(test, sz, &ts1, &ts2);
1770 dev_addr = t->dev_addr;
1771 ktime_get_ts64(&ts1);
1772 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1775 ktime_get_ts64(&ts2);
1776 mmc_test_print_rate(test, sz, &ts1, &ts2);
1780 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1782 struct mmc_test_area *t = &test->area;
1783 unsigned int dev_addr, i, cnt;
1784 struct timespec64 ts1, ts2;
1787 cnt = t->max_sz / sz;
1788 dev_addr = t->dev_addr;
1789 ktime_get_ts64(&ts1);
1790 for (i = 0; i < cnt; i++) {
1791 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1794 dev_addr += (sz >> 9);
1796 ktime_get_ts64(&ts2);
1797 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1802 * Consecutive read performance by transfer size.
1804 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1806 struct mmc_test_area *t = &test->area;
1810 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1811 ret = mmc_test_seq_read_perf(test, sz);
1816 return mmc_test_seq_read_perf(test, sz);
1819 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1821 struct mmc_test_area *t = &test->area;
1822 unsigned int dev_addr, i, cnt;
1823 struct timespec64 ts1, ts2;
1826 ret = mmc_test_area_erase(test);
1829 cnt = t->max_sz / sz;
1830 dev_addr = t->dev_addr;
1831 ktime_get_ts64(&ts1);
1832 for (i = 0; i < cnt; i++) {
1833 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1836 dev_addr += (sz >> 9);
1838 ktime_get_ts64(&ts2);
1839 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1844 * Consecutive write performance by transfer size.
1846 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1848 struct mmc_test_area *t = &test->area;
1852 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1853 ret = mmc_test_seq_write_perf(test, sz);
1858 return mmc_test_seq_write_perf(test, sz);
1862 * Consecutive trim performance by transfer size.
1864 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1866 struct mmc_test_area *t = &test->area;
1868 unsigned int dev_addr, i, cnt;
1869 struct timespec64 ts1, ts2;
1872 if (!mmc_can_trim(test->card))
1873 return RESULT_UNSUP_CARD;
1875 if (!mmc_can_erase(test->card))
1876 return RESULT_UNSUP_HOST;
1878 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1879 ret = mmc_test_area_erase(test);
1882 ret = mmc_test_area_fill(test);
1885 cnt = t->max_sz / sz;
1886 dev_addr = t->dev_addr;
1887 ktime_get_ts64(&ts1);
1888 for (i = 0; i < cnt; i++) {
1889 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1893 dev_addr += (sz >> 9);
1895 ktime_get_ts64(&ts2);
1896 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1901 static unsigned int rnd_next = 1;
1903 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1907 rnd_next = rnd_next * 1103515245 + 12345;
1908 r = (rnd_next >> 16) & 0x7fff;
1909 return (r * rnd_cnt) >> 15;
1912 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1915 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1917 struct timespec64 ts1, ts2, ts;
1922 rnd_addr = mmc_test_capacity(test->card) / 4;
1923 range1 = rnd_addr / test->card->pref_erase;
1924 range2 = range1 / ssz;
1926 ktime_get_ts64(&ts1);
1927 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1928 ktime_get_ts64(&ts2);
1929 ts = timespec64_sub(ts2, ts1);
1930 if (ts.tv_sec >= 10)
1932 ea = mmc_test_rnd_num(range1);
1936 dev_addr = rnd_addr + test->card->pref_erase * ea +
1937 ssz * mmc_test_rnd_num(range2);
1938 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1943 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1947 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1949 struct mmc_test_area *t = &test->area;
1954 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1956 * When writing, try to get more consistent results by running
1957 * the test twice with exactly the same I/O but outputting the
1958 * results only for the 2nd run.
1962 ret = mmc_test_rnd_perf(test, write, 0, sz);
1967 ret = mmc_test_rnd_perf(test, write, 1, sz);
1974 ret = mmc_test_rnd_perf(test, write, 0, sz);
1979 return mmc_test_rnd_perf(test, write, 1, sz);
1983 * Random read performance by transfer size.
1985 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1987 return mmc_test_random_perf(test, 0);
1991 * Random write performance by transfer size.
1993 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1995 return mmc_test_random_perf(test, 1);
1998 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1999 unsigned int tot_sz, int max_scatter)
2001 struct mmc_test_area *t = &test->area;
2002 unsigned int dev_addr, i, cnt, sz, ssz;
2003 struct timespec64 ts1, ts2;
2009 * In the case of a maximally scattered transfer, the maximum transfer
2010 * size is further limited by using PAGE_SIZE segments.
2013 unsigned long max_tfr;
2015 if (t->max_seg_sz >= PAGE_SIZE)
2016 max_tfr = t->max_segs * PAGE_SIZE;
2018 max_tfr = t->max_segs * t->max_seg_sz;
2024 dev_addr = mmc_test_capacity(test->card) / 4;
2025 if (tot_sz > dev_addr << 9)
2026 tot_sz = dev_addr << 9;
2028 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2030 ktime_get_ts64(&ts1);
2031 for (i = 0; i < cnt; i++) {
2032 ret = mmc_test_area_io(test, sz, dev_addr, write,
2038 ktime_get_ts64(&ts2);
2040 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2045 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2049 for (i = 0; i < 10; i++) {
2050 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2054 for (i = 0; i < 5; i++) {
2055 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2059 for (i = 0; i < 3; i++) {
2060 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2069 * Large sequential read performance.
2071 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2073 return mmc_test_large_seq_perf(test, 0);
2077 * Large sequential write performance.
2079 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2081 return mmc_test_large_seq_perf(test, 1);
2084 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2085 struct mmc_test_multiple_rw *tdata,
2086 unsigned int reqsize, unsigned int size,
2089 unsigned int dev_addr;
2090 struct mmc_test_area *t = &test->area;
2093 /* Set up test area */
2094 if (size > mmc_test_capacity(test->card) / 2 * 512)
2095 size = mmc_test_capacity(test->card) / 2 * 512;
2096 if (reqsize > t->max_tfr)
2097 reqsize = t->max_tfr;
2098 dev_addr = mmc_test_capacity(test->card) / 4;
2099 if ((dev_addr & 0xffff0000))
2100 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2102 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2109 /* prepare test area */
2110 if (mmc_can_erase(test->card) &&
2111 tdata->prepare & MMC_TEST_PREP_ERASE) {
2112 ret = mmc_erase(test->card, dev_addr,
2113 size / 512, test->card->erase_arg);
2115 ret = mmc_erase(test->card, dev_addr,
2116 size / 512, MMC_ERASE_ARG);
2122 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2123 tdata->do_write, 0, 1, size / reqsize,
2124 tdata->do_nonblock_req, min_sg_len);
2130 pr_info("[%s] error\n", __func__);
2134 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2135 struct mmc_test_multiple_rw *rw)
2139 void *pre_req = test->card->host->ops->pre_req;
2140 void *post_req = test->card->host->ops->post_req;
2142 if (rw->do_nonblock_req &&
2143 ((!pre_req && post_req) || (pre_req && !post_req))) {
2144 pr_info("error: only one of pre/post is defined\n");
2148 for (i = 0 ; i < rw->len && ret == 0; i++) {
2149 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2156 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2157 struct mmc_test_multiple_rw *rw)
2162 for (i = 0 ; i < rw->len && ret == 0; i++) {
2163 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
2172 * Multiple blocking write 4k to 4 MB chunks
2174 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2176 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2177 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2178 struct mmc_test_multiple_rw test_data = {
2180 .size = TEST_AREA_MAX_SIZE,
2181 .len = ARRAY_SIZE(bs),
2183 .do_nonblock_req = false,
2184 .prepare = MMC_TEST_PREP_ERASE,
2187 return mmc_test_rw_multiple_size(test, &test_data);
2191 * Multiple non-blocking write 4k to 4 MB chunks
2193 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2195 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2196 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2197 struct mmc_test_multiple_rw test_data = {
2199 .size = TEST_AREA_MAX_SIZE,
2200 .len = ARRAY_SIZE(bs),
2202 .do_nonblock_req = true,
2203 .prepare = MMC_TEST_PREP_ERASE,
2206 return mmc_test_rw_multiple_size(test, &test_data);
2210 * Multiple blocking read 4k to 4 MB chunks
2212 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2214 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2215 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2216 struct mmc_test_multiple_rw test_data = {
2218 .size = TEST_AREA_MAX_SIZE,
2219 .len = ARRAY_SIZE(bs),
2221 .do_nonblock_req = false,
2222 .prepare = MMC_TEST_PREP_NONE,
2225 return mmc_test_rw_multiple_size(test, &test_data);
2229 * Multiple non-blocking read 4k to 4 MB chunks
2231 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2233 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2234 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2235 struct mmc_test_multiple_rw test_data = {
2237 .size = TEST_AREA_MAX_SIZE,
2238 .len = ARRAY_SIZE(bs),
2240 .do_nonblock_req = true,
2241 .prepare = MMC_TEST_PREP_NONE,
2244 return mmc_test_rw_multiple_size(test, &test_data);
2248 * Multiple blocking write 1 to 512 sg elements
2250 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2252 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2253 1 << 7, 1 << 8, 1 << 9};
2254 struct mmc_test_multiple_rw test_data = {
2256 .size = TEST_AREA_MAX_SIZE,
2257 .len = ARRAY_SIZE(sg_len),
2259 .do_nonblock_req = false,
2260 .prepare = MMC_TEST_PREP_ERASE,
2263 return mmc_test_rw_multiple_sg_len(test, &test_data);
2267 * Multiple non-blocking write 1 to 512 sg elements
2269 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2271 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2272 1 << 7, 1 << 8, 1 << 9};
2273 struct mmc_test_multiple_rw test_data = {
2275 .size = TEST_AREA_MAX_SIZE,
2276 .len = ARRAY_SIZE(sg_len),
2278 .do_nonblock_req = true,
2279 .prepare = MMC_TEST_PREP_ERASE,
2282 return mmc_test_rw_multiple_sg_len(test, &test_data);
2286 * Multiple blocking read 1 to 512 sg elements
2288 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2290 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2291 1 << 7, 1 << 8, 1 << 9};
2292 struct mmc_test_multiple_rw test_data = {
2294 .size = TEST_AREA_MAX_SIZE,
2295 .len = ARRAY_SIZE(sg_len),
2297 .do_nonblock_req = false,
2298 .prepare = MMC_TEST_PREP_NONE,
2301 return mmc_test_rw_multiple_sg_len(test, &test_data);
2305 * Multiple non-blocking read 1 to 512 sg elements
2307 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2309 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2310 1 << 7, 1 << 8, 1 << 9};
2311 struct mmc_test_multiple_rw test_data = {
2313 .size = TEST_AREA_MAX_SIZE,
2314 .len = ARRAY_SIZE(sg_len),
2316 .do_nonblock_req = true,
2317 .prepare = MMC_TEST_PREP_NONE,
2320 return mmc_test_rw_multiple_sg_len(test, &test_data);
2324 * eMMC hardware reset.
2326 static int mmc_test_reset(struct mmc_test_card *test)
2328 struct mmc_card *card = test->card;
2329 struct mmc_host *host = card->host;
2332 err = mmc_hw_reset(host);
2335 * Reset will re-enable the card's command queue, but tests
2336 * expect it to be disabled.
2338 if (card->ext_csd.cmdq_en)
2339 mmc_cmdq_disable(card);
2341 } else if (err == -EOPNOTSUPP) {
2342 return RESULT_UNSUP_HOST;
2348 static int mmc_test_send_status(struct mmc_test_card *test,
2349 struct mmc_command *cmd)
2351 memset(cmd, 0, sizeof(*cmd));
2353 cmd->opcode = MMC_SEND_STATUS;
2354 if (!mmc_host_is_spi(test->card->host))
2355 cmd->arg = test->card->rca << 16;
2356 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2358 return mmc_wait_for_cmd(test->card->host, cmd, 0);
2361 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2362 unsigned int dev_addr, int use_sbc,
2363 int repeat_cmd, int write, int use_areq)
2365 struct mmc_test_req *rq = mmc_test_req_alloc();
2366 struct mmc_host *host = test->card->host;
2367 struct mmc_test_area *t = &test->area;
2368 struct mmc_request *mrq;
2369 unsigned long timeout;
2370 bool expired = false;
2371 int ret = 0, cmd_ret;
2380 mrq->sbc = &rq->sbc;
2381 mrq->cap_cmd_during_tfr = true;
2383 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2386 if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2387 ret = mmc_host_cmd23(host) ?
2393 /* Start ongoing data request */
2395 ret = mmc_test_start_areq(test, mrq, NULL);
2399 mmc_wait_for_req(host, mrq);
2402 timeout = jiffies + msecs_to_jiffies(3000);
2406 /* Send status command while data transfer in progress */
2407 cmd_ret = mmc_test_send_status(test, &rq->status);
2411 status = rq->status.resp[0];
2412 if (status & R1_ERROR) {
2417 if (mmc_is_req_done(host, mrq))
2420 expired = time_after(jiffies, timeout);
2422 pr_info("%s: timeout waiting for Tran state status %#x\n",
2423 mmc_hostname(host), status);
2424 cmd_ret = -ETIMEDOUT;
2427 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2429 /* Wait for data request to complete */
2431 ret = mmc_test_start_areq(test, NULL, mrq);
2433 mmc_wait_for_req_done(test->card->host, mrq);
2437 * For cap_cmd_during_tfr request, upper layer must send stop if
2440 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2442 mmc_wait_for_cmd(host, mrq->data->stop, 0);
2444 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2451 pr_info("%s: Send Status failed: status %#x, error %d\n",
2452 mmc_hostname(test->card->host), status, cmd_ret);
2455 ret = mmc_test_check_result(test, mrq);
2459 ret = mmc_test_wait_busy(test);
2463 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2464 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2465 mmc_hostname(test->card->host), count, t->blocks);
2475 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2476 unsigned long sz, int use_sbc, int write,
2479 struct mmc_test_area *t = &test->area;
2482 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2483 return RESULT_UNSUP_HOST;
2485 ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
2489 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2494 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2498 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2499 int write, int use_areq)
2501 struct mmc_test_area *t = &test->area;
2505 for (sz = 512; sz <= t->max_tfr; sz += 512) {
2506 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2515 * Commands during read - no Set Block Count (CMD23).
2517 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2519 return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2523 * Commands during write - no Set Block Count (CMD23).
2525 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2527 return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2531 * Commands during read - use Set Block Count (CMD23).
2533 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2535 return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2539 * Commands during write - use Set Block Count (CMD23).
2541 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2543 return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2547 * Commands during non-blocking read - use Set Block Count (CMD23).
2549 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2551 return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2555 * Commands during non-blocking write - use Set Block Count (CMD23).
2557 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2559 return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2562 static const struct mmc_test_case mmc_test_cases[] = {
2564 .name = "Basic write (no data verification)",
2565 .run = mmc_test_basic_write,
2569 .name = "Basic read (no data verification)",
2570 .run = mmc_test_basic_read,
2574 .name = "Basic write (with data verification)",
2575 .prepare = mmc_test_prepare_write,
2576 .run = mmc_test_verify_write,
2577 .cleanup = mmc_test_cleanup,
2581 .name = "Basic read (with data verification)",
2582 .prepare = mmc_test_prepare_read,
2583 .run = mmc_test_verify_read,
2584 .cleanup = mmc_test_cleanup,
2588 .name = "Multi-block write",
2589 .prepare = mmc_test_prepare_write,
2590 .run = mmc_test_multi_write,
2591 .cleanup = mmc_test_cleanup,
2595 .name = "Multi-block read",
2596 .prepare = mmc_test_prepare_read,
2597 .run = mmc_test_multi_read,
2598 .cleanup = mmc_test_cleanup,
2602 .name = "Power of two block writes",
2603 .prepare = mmc_test_prepare_write,
2604 .run = mmc_test_pow2_write,
2605 .cleanup = mmc_test_cleanup,
2609 .name = "Power of two block reads",
2610 .prepare = mmc_test_prepare_read,
2611 .run = mmc_test_pow2_read,
2612 .cleanup = mmc_test_cleanup,
2616 .name = "Weird sized block writes",
2617 .prepare = mmc_test_prepare_write,
2618 .run = mmc_test_weird_write,
2619 .cleanup = mmc_test_cleanup,
2623 .name = "Weird sized block reads",
2624 .prepare = mmc_test_prepare_read,
2625 .run = mmc_test_weird_read,
2626 .cleanup = mmc_test_cleanup,
2630 .name = "Badly aligned write",
2631 .prepare = mmc_test_prepare_write,
2632 .run = mmc_test_align_write,
2633 .cleanup = mmc_test_cleanup,
2637 .name = "Badly aligned read",
2638 .prepare = mmc_test_prepare_read,
2639 .run = mmc_test_align_read,
2640 .cleanup = mmc_test_cleanup,
2644 .name = "Badly aligned multi-block write",
2645 .prepare = mmc_test_prepare_write,
2646 .run = mmc_test_align_multi_write,
2647 .cleanup = mmc_test_cleanup,
2651 .name = "Badly aligned multi-block read",
2652 .prepare = mmc_test_prepare_read,
2653 .run = mmc_test_align_multi_read,
2654 .cleanup = mmc_test_cleanup,
2658 .name = "Proper xfer_size at write (start failure)",
2659 .run = mmc_test_xfersize_write,
2663 .name = "Proper xfer_size at read (start failure)",
2664 .run = mmc_test_xfersize_read,
2668 .name = "Proper xfer_size at write (midway failure)",
2669 .run = mmc_test_multi_xfersize_write,
2673 .name = "Proper xfer_size at read (midway failure)",
2674 .run = mmc_test_multi_xfersize_read,
2677 #ifdef CONFIG_HIGHMEM
2680 .name = "Highmem write",
2681 .prepare = mmc_test_prepare_write,
2682 .run = mmc_test_write_high,
2683 .cleanup = mmc_test_cleanup,
2687 .name = "Highmem read",
2688 .prepare = mmc_test_prepare_read,
2689 .run = mmc_test_read_high,
2690 .cleanup = mmc_test_cleanup,
2694 .name = "Multi-block highmem write",
2695 .prepare = mmc_test_prepare_write,
2696 .run = mmc_test_multi_write_high,
2697 .cleanup = mmc_test_cleanup,
2701 .name = "Multi-block highmem read",
2702 .prepare = mmc_test_prepare_read,
2703 .run = mmc_test_multi_read_high,
2704 .cleanup = mmc_test_cleanup,
2710 .name = "Highmem write",
2711 .run = mmc_test_no_highmem,
2715 .name = "Highmem read",
2716 .run = mmc_test_no_highmem,
2720 .name = "Multi-block highmem write",
2721 .run = mmc_test_no_highmem,
2725 .name = "Multi-block highmem read",
2726 .run = mmc_test_no_highmem,
2729 #endif /* CONFIG_HIGHMEM */
2732 .name = "Best-case read performance",
2733 .prepare = mmc_test_area_prepare_fill,
2734 .run = mmc_test_best_read_performance,
2735 .cleanup = mmc_test_area_cleanup,
2739 .name = "Best-case write performance",
2740 .prepare = mmc_test_area_prepare_erase,
2741 .run = mmc_test_best_write_performance,
2742 .cleanup = mmc_test_area_cleanup,
2746 .name = "Best-case read performance into scattered pages",
2747 .prepare = mmc_test_area_prepare_fill,
2748 .run = mmc_test_best_read_perf_max_scatter,
2749 .cleanup = mmc_test_area_cleanup,
2753 .name = "Best-case write performance from scattered pages",
2754 .prepare = mmc_test_area_prepare_erase,
2755 .run = mmc_test_best_write_perf_max_scatter,
2756 .cleanup = mmc_test_area_cleanup,
2760 .name = "Single read performance by transfer size",
2761 .prepare = mmc_test_area_prepare_fill,
2762 .run = mmc_test_profile_read_perf,
2763 .cleanup = mmc_test_area_cleanup,
2767 .name = "Single write performance by transfer size",
2768 .prepare = mmc_test_area_prepare,
2769 .run = mmc_test_profile_write_perf,
2770 .cleanup = mmc_test_area_cleanup,
2774 .name = "Single trim performance by transfer size",
2775 .prepare = mmc_test_area_prepare_fill,
2776 .run = mmc_test_profile_trim_perf,
2777 .cleanup = mmc_test_area_cleanup,
2781 .name = "Consecutive read performance by transfer size",
2782 .prepare = mmc_test_area_prepare_fill,
2783 .run = mmc_test_profile_seq_read_perf,
2784 .cleanup = mmc_test_area_cleanup,
2788 .name = "Consecutive write performance by transfer size",
2789 .prepare = mmc_test_area_prepare,
2790 .run = mmc_test_profile_seq_write_perf,
2791 .cleanup = mmc_test_area_cleanup,
2795 .name = "Consecutive trim performance by transfer size",
2796 .prepare = mmc_test_area_prepare,
2797 .run = mmc_test_profile_seq_trim_perf,
2798 .cleanup = mmc_test_area_cleanup,
2802 .name = "Random read performance by transfer size",
2803 .prepare = mmc_test_area_prepare,
2804 .run = mmc_test_random_read_perf,
2805 .cleanup = mmc_test_area_cleanup,
2809 .name = "Random write performance by transfer size",
2810 .prepare = mmc_test_area_prepare,
2811 .run = mmc_test_random_write_perf,
2812 .cleanup = mmc_test_area_cleanup,
2816 .name = "Large sequential read into scattered pages",
2817 .prepare = mmc_test_area_prepare,
2818 .run = mmc_test_large_seq_read_perf,
2819 .cleanup = mmc_test_area_cleanup,
2823 .name = "Large sequential write from scattered pages",
2824 .prepare = mmc_test_area_prepare,
2825 .run = mmc_test_large_seq_write_perf,
2826 .cleanup = mmc_test_area_cleanup,
2830 .name = "Write performance with blocking req 4k to 4MB",
2831 .prepare = mmc_test_area_prepare,
2832 .run = mmc_test_profile_mult_write_blocking_perf,
2833 .cleanup = mmc_test_area_cleanup,
2837 .name = "Write performance with non-blocking req 4k to 4MB",
2838 .prepare = mmc_test_area_prepare,
2839 .run = mmc_test_profile_mult_write_nonblock_perf,
2840 .cleanup = mmc_test_area_cleanup,
2844 .name = "Read performance with blocking req 4k to 4MB",
2845 .prepare = mmc_test_area_prepare,
2846 .run = mmc_test_profile_mult_read_blocking_perf,
2847 .cleanup = mmc_test_area_cleanup,
2851 .name = "Read performance with non-blocking req 4k to 4MB",
2852 .prepare = mmc_test_area_prepare,
2853 .run = mmc_test_profile_mult_read_nonblock_perf,
2854 .cleanup = mmc_test_area_cleanup,
2858 .name = "Write performance blocking req 1 to 512 sg elems",
2859 .prepare = mmc_test_area_prepare,
2860 .run = mmc_test_profile_sglen_wr_blocking_perf,
2861 .cleanup = mmc_test_area_cleanup,
2865 .name = "Write performance non-blocking req 1 to 512 sg elems",
2866 .prepare = mmc_test_area_prepare,
2867 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2868 .cleanup = mmc_test_area_cleanup,
2872 .name = "Read performance blocking req 1 to 512 sg elems",
2873 .prepare = mmc_test_area_prepare,
2874 .run = mmc_test_profile_sglen_r_blocking_perf,
2875 .cleanup = mmc_test_area_cleanup,
2879 .name = "Read performance non-blocking req 1 to 512 sg elems",
2880 .prepare = mmc_test_area_prepare,
2881 .run = mmc_test_profile_sglen_r_nonblock_perf,
2882 .cleanup = mmc_test_area_cleanup,
2886 .name = "Reset test",
2887 .run = mmc_test_reset,
2891 .name = "Commands during read - no Set Block Count (CMD23)",
2892 .prepare = mmc_test_area_prepare,
2893 .run = mmc_test_cmds_during_read,
2894 .cleanup = mmc_test_area_cleanup,
2898 .name = "Commands during write - no Set Block Count (CMD23)",
2899 .prepare = mmc_test_area_prepare,
2900 .run = mmc_test_cmds_during_write,
2901 .cleanup = mmc_test_area_cleanup,
2905 .name = "Commands during read - use Set Block Count (CMD23)",
2906 .prepare = mmc_test_area_prepare,
2907 .run = mmc_test_cmds_during_read_cmd23,
2908 .cleanup = mmc_test_area_cleanup,
2912 .name = "Commands during write - use Set Block Count (CMD23)",
2913 .prepare = mmc_test_area_prepare,
2914 .run = mmc_test_cmds_during_write_cmd23,
2915 .cleanup = mmc_test_area_cleanup,
2919 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2920 .prepare = mmc_test_area_prepare,
2921 .run = mmc_test_cmds_during_read_cmd23_nonblock,
2922 .cleanup = mmc_test_area_cleanup,
2926 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2927 .prepare = mmc_test_area_prepare,
2928 .run = mmc_test_cmds_during_write_cmd23_nonblock,
2929 .cleanup = mmc_test_area_cleanup,
2933 static DEFINE_MUTEX(mmc_test_lock);
2935 static LIST_HEAD(mmc_test_result);
2937 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2941 pr_info("%s: Starting tests of card %s...\n",
2942 mmc_hostname(test->card->host), mmc_card_id(test->card));
2944 mmc_claim_host(test->card->host);
2946 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
2947 struct mmc_test_general_result *gr;
2949 if (testcase && ((i + 1) != testcase))
2952 pr_info("%s: Test case %d. %s...\n",
2953 mmc_hostname(test->card->host), i + 1,
2954 mmc_test_cases[i].name);
2956 if (mmc_test_cases[i].prepare) {
2957 ret = mmc_test_cases[i].prepare(test);
2959 pr_info("%s: Result: Prepare stage failed! (%d)\n",
2960 mmc_hostname(test->card->host),
2966 gr = kzalloc(sizeof(*gr), GFP_KERNEL);
2968 INIT_LIST_HEAD(&gr->tr_lst);
2970 /* Assign data what we know already */
2971 gr->card = test->card;
2974 /* Append container to global one */
2975 list_add_tail(&gr->link, &mmc_test_result);
2978 * Save the pointer to created container in our private
2984 ret = mmc_test_cases[i].run(test);
2987 pr_info("%s: Result: OK\n",
2988 mmc_hostname(test->card->host));
2991 pr_info("%s: Result: FAILED\n",
2992 mmc_hostname(test->card->host));
2994 case RESULT_UNSUP_HOST:
2995 pr_info("%s: Result: UNSUPPORTED (by host)\n",
2996 mmc_hostname(test->card->host));
2998 case RESULT_UNSUP_CARD:
2999 pr_info("%s: Result: UNSUPPORTED (by card)\n",
3000 mmc_hostname(test->card->host));
3003 pr_info("%s: Result: ERROR (%d)\n",
3004 mmc_hostname(test->card->host), ret);
3007 /* Save the result */
3011 if (mmc_test_cases[i].cleanup) {
3012 ret = mmc_test_cases[i].cleanup(test);
3014 pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3015 mmc_hostname(test->card->host),
3021 mmc_release_host(test->card->host);
3023 pr_info("%s: Tests completed.\n",
3024 mmc_hostname(test->card->host));
3027 static void mmc_test_free_result(struct mmc_card *card)
3029 struct mmc_test_general_result *gr, *grs;
3031 mutex_lock(&mmc_test_lock);
3033 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3034 struct mmc_test_transfer_result *tr, *trs;
3036 if (card && gr->card != card)
3039 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3040 list_del(&tr->link);
3044 list_del(&gr->link);
3048 mutex_unlock(&mmc_test_lock);
3051 static LIST_HEAD(mmc_test_file_test);
3053 static int mtf_test_show(struct seq_file *sf, void *data)
3055 struct mmc_card *card = (struct mmc_card *)sf->private;
3056 struct mmc_test_general_result *gr;
3058 mutex_lock(&mmc_test_lock);
3060 list_for_each_entry(gr, &mmc_test_result, link) {
3061 struct mmc_test_transfer_result *tr;
3063 if (gr->card != card)
3066 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3068 list_for_each_entry(tr, &gr->tr_lst, link) {
3069 seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
3070 tr->count, tr->sectors,
3071 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
3072 tr->rate, tr->iops / 100, tr->iops % 100);
3076 mutex_unlock(&mmc_test_lock);
3081 static int mtf_test_open(struct inode *inode, struct file *file)
3083 return single_open(file, mtf_test_show, inode->i_private);
3086 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3087 size_t count, loff_t *pos)
3089 struct seq_file *sf = (struct seq_file *)file->private_data;
3090 struct mmc_card *card = (struct mmc_card *)sf->private;
3091 struct mmc_test_card *test;
3095 ret = kstrtol_from_user(buf, count, 10, &testcase);
3099 test = kzalloc(sizeof(*test), GFP_KERNEL);
3104 * Remove all test cases associated with given card. Thus we have only
3105 * actual data of the last run.
3107 mmc_test_free_result(card);
3111 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3112 #ifdef CONFIG_HIGHMEM
3113 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3116 #ifdef CONFIG_HIGHMEM
3117 if (test->buffer && test->highmem) {
3121 mutex_lock(&mmc_test_lock);
3122 mmc_test_run(test, testcase);
3123 mutex_unlock(&mmc_test_lock);
3126 #ifdef CONFIG_HIGHMEM
3127 __free_pages(test->highmem, BUFFER_ORDER);
3129 kfree(test->buffer);
3135 static const struct file_operations mmc_test_fops_test = {
3136 .open = mtf_test_open,
3138 .write = mtf_test_write,
3139 .llseek = seq_lseek,
3140 .release = single_release,
3143 static int mtf_testlist_show(struct seq_file *sf, void *data)
3147 mutex_lock(&mmc_test_lock);
3149 seq_puts(sf, "0:\tRun all tests\n");
3150 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3151 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
3153 mutex_unlock(&mmc_test_lock);
3158 DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
3160 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3162 struct mmc_test_dbgfs_file *df, *dfs;
3164 mutex_lock(&mmc_test_lock);
3166 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3167 if (card && df->card != card)
3169 debugfs_remove(df->file);
3170 list_del(&df->link);
3174 mutex_unlock(&mmc_test_lock);
3177 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3178 const char *name, umode_t mode, const struct file_operations *fops)
3180 struct dentry *file = NULL;
3181 struct mmc_test_dbgfs_file *df;
3183 if (card->debugfs_root)
3184 debugfs_create_file(name, mode, card->debugfs_root, card, fops);
3186 df = kmalloc(sizeof(*df), GFP_KERNEL);
3188 debugfs_remove(file);
3195 list_add(&df->link, &mmc_test_file_test);
3199 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3203 mutex_lock(&mmc_test_lock);
3205 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3206 &mmc_test_fops_test);
3210 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3211 &mtf_testlist_fops);
3216 mutex_unlock(&mmc_test_lock);
3221 static int mmc_test_probe(struct mmc_card *card)
3225 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3228 ret = mmc_test_register_dbgfs_file(card);
3232 if (card->ext_csd.cmdq_en) {
3233 mmc_claim_host(card->host);
3234 ret = mmc_cmdq_disable(card);
3235 mmc_release_host(card->host);
3240 dev_info(&card->dev, "Card claimed for testing.\n");
3245 static void mmc_test_remove(struct mmc_card *card)
3247 if (card->reenable_cmdq) {
3248 mmc_claim_host(card->host);
3249 mmc_cmdq_enable(card);
3250 mmc_release_host(card->host);
3252 mmc_test_free_result(card);
3253 mmc_test_free_dbgfs_file(card);
3256 static struct mmc_driver mmc_driver = {
3260 .probe = mmc_test_probe,
3261 .remove = mmc_test_remove,
3264 static int __init mmc_test_init(void)
3266 return mmc_register_driver(&mmc_driver);
3269 static void __exit mmc_test_exit(void)
3271 /* Clear stalled data if card is still plugged */
3272 mmc_test_free_result(NULL);
3273 mmc_test_free_dbgfs_file(NULL);
3275 mmc_unregister_driver(&mmc_driver);
3278 module_init(mmc_test_init);
3279 module_exit(mmc_test_exit);
3281 MODULE_LICENSE("GPL");
3282 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3283 MODULE_AUTHOR("Pierre Ossman");