1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMA Engine test module
5 * Copyright (C) 2007 Atmel Corporation
6 * Copyright (C) 2013 Intel Corporation
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/freezer.h>
14 #include <linux/init.h>
15 #include <linux/kthread.h>
16 #include <linux/sched/task.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/wait.h>
23 static unsigned int test_buf_size = 16384;
24 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
25 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
27 static char test_device[32];
28 module_param_string(device, test_device, sizeof(test_device),
30 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
32 static unsigned int threads_per_chan = 1;
33 module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
34 MODULE_PARM_DESC(threads_per_chan,
35 "Number of threads to start per channel (default: 1)");
37 static unsigned int max_channels;
38 module_param(max_channels, uint, S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(max_channels,
40 "Maximum number of channels to use (default: all)");
42 static unsigned int iterations;
43 module_param(iterations, uint, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(iterations,
45 "Iterations before stopping test (default: infinite)");
47 static unsigned int dmatest;
48 module_param(dmatest, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(dmatest,
50 "dmatest 0-memcpy 1-memset (default: 0)");
52 static unsigned int xor_sources = 3;
53 module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
54 MODULE_PARM_DESC(xor_sources,
55 "Number of xor source buffers (default: 3)");
57 static unsigned int pq_sources = 3;
58 module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
59 MODULE_PARM_DESC(pq_sources,
60 "Number of p+q source buffers (default: 3)");
62 static int timeout = 3000;
63 module_param(timeout, int, S_IRUGO | S_IWUSR);
64 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
65 "Pass -1 for infinite timeout");
68 module_param(noverify, bool, S_IRUGO | S_IWUSR);
69 MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
72 module_param(norandom, bool, 0644);
73 MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
76 module_param(verbose, bool, S_IRUGO | S_IWUSR);
77 MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
79 static int alignment = -1;
80 module_param(alignment, int, 0644);
81 MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))");
83 static unsigned int transfer_size;
84 module_param(transfer_size, uint, 0644);
85 MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
88 module_param(polled, bool, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
92 * struct dmatest_params - test parameters.
93 * @buf_size: size of the memcpy test buffer
94 * @channel: bus ID of the channel to test
95 * @device: bus ID of the DMA Engine to test
96 * @threads_per_chan: number of threads to start per channel
97 * @max_channels: maximum number of channels to use
98 * @iterations: iterations before stopping test
99 * @xor_sources: number of xor source buffers
100 * @pq_sources: number of p+q source buffers
101 * @timeout: transfer timeout in msec, -1 for infinite timeout
102 * @noverify: disable data verification
103 * @norandom: disable random offset setup
104 * @alignment: custom data address alignment taken as 2^alignment
105 * @transfer_size: custom transfer size in bytes
106 * @polled: use polling for completion instead of interrupts
108 struct dmatest_params {
109 unsigned int buf_size;
112 unsigned int threads_per_chan;
113 unsigned int max_channels;
114 unsigned int iterations;
115 unsigned int xor_sources;
116 unsigned int pq_sources;
121 unsigned int transfer_size;
126 * struct dmatest_info - test information.
127 * @params: test parameters
128 * @channels: channels under test
129 * @nr_channels: number of channels under test
130 * @lock: access protection to the fields of this structure
131 * @did_init: module has been initialized completely
132 * @last_error: test has faced configuration issues
134 static struct dmatest_info {
135 /* Test parameters */
136 struct dmatest_params params;
139 struct list_head channels;
140 unsigned int nr_channels;
145 .channels = LIST_HEAD_INIT(test_info.channels),
146 .lock = __MUTEX_INITIALIZER(test_info.lock),
149 static int dmatest_run_set(const char *val, const struct kernel_param *kp);
150 static int dmatest_run_get(char *val, const struct kernel_param *kp);
151 static const struct kernel_param_ops run_ops = {
152 .set = dmatest_run_set,
153 .get = dmatest_run_get,
155 static bool dmatest_run;
156 module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
157 MODULE_PARM_DESC(run, "Run the test (default: false)");
159 static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
160 static int dmatest_chan_get(char *val, const struct kernel_param *kp);
161 static const struct kernel_param_ops multi_chan_ops = {
162 .set = dmatest_chan_set,
163 .get = dmatest_chan_get,
166 static char test_channel[20];
167 static struct kparam_string newchan_kps = {
168 .string = test_channel,
171 module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644);
172 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
174 static int dmatest_test_list_get(char *val, const struct kernel_param *kp);
175 static const struct kernel_param_ops test_list_ops = {
176 .get = dmatest_test_list_get,
178 module_param_cb(test_list, &test_list_ops, NULL, 0444);
179 MODULE_PARM_DESC(test_list, "Print current test list");
181 /* Maximum amount of mismatched bytes in buffer to print */
182 #define MAX_ERROR_COUNT 32
185 * Initialization patterns. All bytes in the source buffer has bit 7
186 * set, all bytes in the destination buffer has bit 7 cleared.
188 * Bit 6 is set for all bytes which are to be copied by the DMA
189 * engine. Bit 5 is set for all bytes which are to be overwritten by
192 * The remaining bits are the inverse of a counter which increments by
193 * one for each byte address.
195 #define PATTERN_SRC 0x80
196 #define PATTERN_DST 0x00
197 #define PATTERN_COPY 0x40
198 #define PATTERN_OVERWRITE 0x20
199 #define PATTERN_COUNT_MASK 0x1f
200 #define PATTERN_MEMSET_IDX 0x01
202 /* Fixed point arithmetic ops */
203 #define FIXPT_SHIFT 8
204 #define FIXPNT_MASK 0xFF
205 #define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT)
206 #define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT)
207 #define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT)
209 /* poor man's completion - we want to use wait_event_freezable() on it */
210 struct dmatest_done {
212 wait_queue_head_t *wait;
215 struct dmatest_data {
222 struct dmatest_thread {
223 struct list_head node;
224 struct dmatest_info *info;
225 struct task_struct *task;
226 struct dma_chan *chan;
227 struct dmatest_data src;
228 struct dmatest_data dst;
229 enum dma_transaction_type type;
230 wait_queue_head_t done_wait;
231 struct dmatest_done test_done;
236 struct dmatest_chan {
237 struct list_head node;
238 struct dma_chan *chan;
239 struct list_head threads;
242 static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
245 static bool is_threaded_test_run(struct dmatest_info *info)
247 struct dmatest_chan *dtc;
249 list_for_each_entry(dtc, &info->channels, node) {
250 struct dmatest_thread *thread;
252 list_for_each_entry(thread, &dtc->threads, node) {
253 if (!thread->done && !thread->pending)
261 static bool is_threaded_test_pending(struct dmatest_info *info)
263 struct dmatest_chan *dtc;
265 list_for_each_entry(dtc, &info->channels, node) {
266 struct dmatest_thread *thread;
268 list_for_each_entry(thread, &dtc->threads, node) {
277 static int dmatest_wait_get(char *val, const struct kernel_param *kp)
279 struct dmatest_info *info = &test_info;
280 struct dmatest_params *params = &info->params;
282 if (params->iterations)
283 wait_event(thread_wait, !is_threaded_test_run(info));
285 return param_get_bool(val, kp);
288 static const struct kernel_param_ops wait_ops = {
289 .get = dmatest_wait_get,
290 .set = param_set_bool,
292 module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
293 MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
295 static bool dmatest_match_channel(struct dmatest_params *params,
296 struct dma_chan *chan)
298 if (params->channel[0] == '\0')
300 return strcmp(dma_chan_name(chan), params->channel) == 0;
303 static bool dmatest_match_device(struct dmatest_params *params,
304 struct dma_device *device)
306 if (params->device[0] == '\0')
308 return strcmp(dev_name(device->dev), params->device) == 0;
311 static unsigned long dmatest_random(void)
315 prandom_bytes(&buf, sizeof(buf));
319 static inline u8 gen_inv_idx(u8 index, bool is_memset)
321 u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
323 return ~val & PATTERN_COUNT_MASK;
326 static inline u8 gen_src_value(u8 index, bool is_memset)
328 return PATTERN_SRC | gen_inv_idx(index, is_memset);
331 static inline u8 gen_dst_value(u8 index, bool is_memset)
333 return PATTERN_DST | gen_inv_idx(index, is_memset);
336 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
337 unsigned int buf_size, bool is_memset)
342 for (; (buf = *bufs); bufs++) {
343 for (i = 0; i < start; i++)
344 buf[i] = gen_src_value(i, is_memset);
345 for ( ; i < start + len; i++)
346 buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
347 for ( ; i < buf_size; i++)
348 buf[i] = gen_src_value(i, is_memset);
353 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
354 unsigned int buf_size, bool is_memset)
359 for (; (buf = *bufs); bufs++) {
360 for (i = 0; i < start; i++)
361 buf[i] = gen_dst_value(i, is_memset);
362 for ( ; i < start + len; i++)
363 buf[i] = gen_dst_value(i, is_memset) |
365 for ( ; i < buf_size; i++)
366 buf[i] = gen_dst_value(i, is_memset);
370 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
371 unsigned int counter, bool is_srcbuf, bool is_memset)
373 u8 diff = actual ^ pattern;
374 u8 expected = pattern | gen_inv_idx(counter, is_memset);
375 const char *thread_name = current->comm;
378 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
379 thread_name, index, expected, actual);
380 else if ((pattern & PATTERN_COPY)
381 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
382 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
383 thread_name, index, expected, actual);
384 else if (diff & PATTERN_SRC)
385 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
386 thread_name, index, expected, actual);
388 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
389 thread_name, index, expected, actual);
392 static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
393 unsigned int end, unsigned int counter, u8 pattern,
394 bool is_srcbuf, bool is_memset)
397 unsigned int error_count = 0;
401 unsigned int counter_orig = counter;
403 for (; (buf = *bufs); bufs++) {
404 counter = counter_orig;
405 for (i = start; i < end; i++) {
407 expected = pattern | gen_inv_idx(counter, is_memset);
408 if (actual != expected) {
409 if (error_count < MAX_ERROR_COUNT)
410 dmatest_mismatch(actual, pattern, i,
419 if (error_count > MAX_ERROR_COUNT)
420 pr_warn("%s: %u errors suppressed\n",
421 current->comm, error_count - MAX_ERROR_COUNT);
427 static void dmatest_callback(void *arg)
429 struct dmatest_done *done = arg;
430 struct dmatest_thread *thread =
431 container_of(done, struct dmatest_thread, test_done);
434 wake_up_all(done->wait);
437 * If thread->done, it means that this callback occurred
438 * after the parent thread has cleaned up. This can
439 * happen in the case that driver doesn't implement
440 * the terminate_all() functionality and a dma operation
441 * did not occur within the timeout period
443 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
447 static unsigned int min_odd(unsigned int x, unsigned int y)
449 unsigned int val = min(x, y);
451 return val % 2 ? val : val - 1;
454 static void result(const char *err, unsigned int n, unsigned int src_off,
455 unsigned int dst_off, unsigned int len, unsigned long data)
457 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
458 current->comm, n, err, src_off, dst_off, len, data);
461 static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
462 unsigned int dst_off, unsigned int len,
465 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
466 current->comm, n, err, src_off, dst_off, len, data);
469 #define verbose_result(err, n, src_off, dst_off, len, data) ({ \
471 result(err, n, src_off, dst_off, len, data); \
473 dbg_result(err, n, src_off, dst_off, len, data);\
476 static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
478 unsigned long long per_sec = 1000000;
483 /* drop precision until runtime is 32-bits */
484 while (runtime > UINT_MAX) {
490 per_sec = INT_TO_FIXPT(per_sec);
491 do_div(per_sec, runtime);
496 static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
498 return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
501 static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
505 for (i = 0; i < cnt; i++)
512 static void dmatest_free_test_data(struct dmatest_data *d)
514 __dmatest_free_test_data(d, d->cnt);
517 static int dmatest_alloc_test_data(struct dmatest_data *d,
518 unsigned int buf_size, u8 align)
522 d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
526 d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
530 for (i = 0; i < d->cnt; i++) {
531 d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
535 /* align to alignment restriction */
537 d->aligned[i] = PTR_ALIGN(d->raw[i], align);
539 d->aligned[i] = d->raw[i];
544 __dmatest_free_test_data(d, i);
549 * This function repeatedly tests DMA transfers of various lengths and
550 * offsets for a given operation type until it is told to exit by
551 * kthread_stop(). There may be multiple threads running this function
552 * in parallel for a single channel, and there may be multiple channels
553 * being tested in parallel.
555 * Before each test, the source and destination buffer is initialized
556 * with a known pattern. This pattern is different depending on
557 * whether it's in an area which is supposed to be copied or
558 * overwritten, and different in the source and destination buffers.
559 * So if the DMA engine doesn't copy exactly what we tell it to copy,
562 static int dmatest_func(void *data)
564 struct dmatest_thread *thread = data;
565 struct dmatest_done *done = &thread->test_done;
566 struct dmatest_info *info;
567 struct dmatest_params *params;
568 struct dma_chan *chan;
569 struct dma_device *dev;
570 unsigned int error_count;
571 unsigned int failed_tests = 0;
572 unsigned int total_tests = 0;
574 enum dma_status status;
575 enum dma_ctrl_flags flags;
578 unsigned int buf_size;
579 struct dmatest_data *src;
580 struct dmatest_data *dst;
582 ktime_t ktime, start, diff;
583 ktime_t filltime = 0;
584 ktime_t comparetime = 0;
586 unsigned long long total_len = 0;
587 unsigned long long iops = 0;
589 bool is_memset = false;
598 thread->pending = false;
600 params = &info->params;
605 if (thread->type == DMA_MEMCPY) {
606 align = params->alignment < 0 ? dev->copy_align :
608 src->cnt = dst->cnt = 1;
609 } else if (thread->type == DMA_MEMSET) {
610 align = params->alignment < 0 ? dev->fill_align :
612 src->cnt = dst->cnt = 1;
614 } else if (thread->type == DMA_XOR) {
615 /* force odd to ensure dst = src */
616 src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
618 align = params->alignment < 0 ? dev->xor_align :
620 } else if (thread->type == DMA_PQ) {
621 /* force odd to ensure dst = src */
622 src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
624 align = params->alignment < 0 ? dev->pq_align :
627 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
629 goto err_thread_type;
631 for (i = 0; i < src->cnt; i++)
634 goto err_thread_type;
636 /* Check if buffer count fits into map count variable (u8) */
637 if ((src->cnt + dst->cnt) >= 255) {
638 pr_err("too many buffers (%d of 255 supported)\n",
639 src->cnt + dst->cnt);
643 buf_size = params->buf_size;
644 if (1 << align > buf_size) {
645 pr_err("%u-byte buffer too small for %d-byte alignment\n",
646 buf_size, 1 << align);
650 if (dmatest_alloc_test_data(src, buf_size, align) < 0)
653 if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
656 set_user_nice(current, 10);
658 srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
662 dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
667 * src and dst buffers are freed by ourselves below
670 flags = DMA_CTRL_ACK;
672 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
675 while (!(kthread_should_stop() ||
676 (params->iterations && total_tests >= params->iterations))) {
677 struct dma_async_tx_descriptor *tx = NULL;
678 struct dmaengine_unmap_data *um;
684 if (params->transfer_size) {
685 if (params->transfer_size >= buf_size) {
686 pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
687 params->transfer_size, buf_size);
690 len = params->transfer_size;
691 } else if (params->norandom) {
694 len = dmatest_random() % buf_size + 1;
697 /* Do not alter transfer size explicitly defined by user */
698 if (!params->transfer_size) {
699 len = (len >> align) << align;
705 if (params->norandom) {
709 src->off = dmatest_random() % (buf_size - len + 1);
710 dst->off = dmatest_random() % (buf_size - len + 1);
712 src->off = (src->off >> align) << align;
713 dst->off = (dst->off >> align) << align;
716 if (!params->noverify) {
718 dmatest_init_srcs(src->aligned, src->off, len,
719 buf_size, is_memset);
720 dmatest_init_dsts(dst->aligned, dst->off, len,
721 buf_size, is_memset);
723 diff = ktime_sub(ktime_get(), start);
724 filltime = ktime_add(filltime, diff);
727 um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
731 result("unmap data NULL", total_tests,
732 src->off, dst->off, len, ret);
737 for (i = 0; i < src->cnt; i++) {
738 void *buf = src->aligned[i];
739 struct page *pg = virt_to_page(buf);
740 unsigned long pg_off = offset_in_page(buf);
742 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
743 um->len, DMA_TO_DEVICE);
744 srcs[i] = um->addr[i] + src->off;
745 ret = dma_mapping_error(dev->dev, um->addr[i]);
747 result("src mapping error", total_tests,
748 src->off, dst->off, len, ret);
749 goto error_unmap_continue;
753 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
754 dsts = &um->addr[src->cnt];
755 for (i = 0; i < dst->cnt; i++) {
756 void *buf = dst->aligned[i];
757 struct page *pg = virt_to_page(buf);
758 unsigned long pg_off = offset_in_page(buf);
760 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
762 ret = dma_mapping_error(dev->dev, dsts[i]);
764 result("dst mapping error", total_tests,
765 src->off, dst->off, len, ret);
766 goto error_unmap_continue;
771 if (thread->type == DMA_MEMCPY)
772 tx = dev->device_prep_dma_memcpy(chan,
774 srcs[0], len, flags);
775 else if (thread->type == DMA_MEMSET)
776 tx = dev->device_prep_dma_memset(chan,
778 *(src->aligned[0] + src->off),
780 else if (thread->type == DMA_XOR)
781 tx = dev->device_prep_dma_xor(chan,
785 else if (thread->type == DMA_PQ) {
786 for (i = 0; i < dst->cnt; i++)
787 dma_pq[i] = dsts[i] + dst->off;
788 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
794 result("prep error", total_tests, src->off,
797 goto error_unmap_continue;
801 if (!params->polled) {
802 tx->callback = dmatest_callback;
803 tx->callback_param = done;
805 cookie = tx->tx_submit(tx);
807 if (dma_submit_error(cookie)) {
808 result("submit error", total_tests, src->off,
811 goto error_unmap_continue;
814 if (params->polled) {
815 status = dma_sync_wait(chan, cookie);
816 dmaengine_terminate_sync(chan);
817 if (status == DMA_COMPLETE)
820 dma_async_issue_pending(chan);
822 wait_event_freezable_timeout(thread->done_wait,
824 msecs_to_jiffies(params->timeout));
826 status = dma_async_is_tx_complete(chan, cookie, NULL,
831 result("test timed out", total_tests, src->off, dst->off,
833 goto error_unmap_continue;
834 } else if (status != DMA_COMPLETE &&
835 !(dma_has_cap(DMA_COMPLETION_NO_ORDER,
837 status == DMA_OUT_OF_ORDER)) {
838 result(status == DMA_ERROR ?
839 "completion error status" :
840 "completion busy status", total_tests, src->off,
842 goto error_unmap_continue;
845 dmaengine_unmap_put(um);
847 if (params->noverify) {
848 verbose_result("test passed", total_tests, src->off,
854 pr_debug("%s: verifying source buffer...\n", current->comm);
855 error_count = dmatest_verify(src->aligned, 0, src->off,
856 0, PATTERN_SRC, true, is_memset);
857 error_count += dmatest_verify(src->aligned, src->off,
858 src->off + len, src->off,
859 PATTERN_SRC | PATTERN_COPY, true, is_memset);
860 error_count += dmatest_verify(src->aligned, src->off + len,
861 buf_size, src->off + len,
862 PATTERN_SRC, true, is_memset);
864 pr_debug("%s: verifying dest buffer...\n", current->comm);
865 error_count += dmatest_verify(dst->aligned, 0, dst->off,
866 0, PATTERN_DST, false, is_memset);
868 error_count += dmatest_verify(dst->aligned, dst->off,
869 dst->off + len, src->off,
870 PATTERN_SRC | PATTERN_COPY, false, is_memset);
872 error_count += dmatest_verify(dst->aligned, dst->off + len,
873 buf_size, dst->off + len,
874 PATTERN_DST, false, is_memset);
876 diff = ktime_sub(ktime_get(), start);
877 comparetime = ktime_add(comparetime, diff);
880 result("data error", total_tests, src->off, dst->off,
884 verbose_result("test passed", total_tests, src->off,
890 error_unmap_continue:
891 dmaengine_unmap_put(um);
894 ktime = ktime_sub(ktime_get(), ktime);
895 ktime = ktime_sub(ktime, comparetime);
896 ktime = ktime_sub(ktime, filltime);
897 runtime = ktime_to_us(ktime);
904 dmatest_free_test_data(dst);
906 dmatest_free_test_data(src);
910 iops = dmatest_persec(runtime, total_tests);
911 pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n",
912 current->comm, total_tests, failed_tests,
913 FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops),
914 dmatest_KBs(runtime, total_len), ret);
916 /* terminate all transfers on specified channels */
917 if (ret || failed_tests)
918 dmaengine_terminate_sync(chan);
921 wake_up(&thread_wait);
926 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
928 struct dmatest_thread *thread;
929 struct dmatest_thread *_thread;
932 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
933 ret = kthread_stop(thread->task);
934 pr_debug("thread %s exited with status %d\n",
935 thread->task->comm, ret);
936 list_del(&thread->node);
937 put_task_struct(thread->task);
941 /* terminate all transfers on specified channels */
942 dmaengine_terminate_sync(dtc->chan);
947 static int dmatest_add_threads(struct dmatest_info *info,
948 struct dmatest_chan *dtc, enum dma_transaction_type type)
950 struct dmatest_params *params = &info->params;
951 struct dmatest_thread *thread;
952 struct dma_chan *chan = dtc->chan;
956 if (type == DMA_MEMCPY)
958 else if (type == DMA_MEMSET)
960 else if (type == DMA_XOR)
962 else if (type == DMA_PQ)
967 for (i = 0; i < params->threads_per_chan; i++) {
968 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
970 pr_warn("No memory for %s-%s%u\n",
971 dma_chan_name(chan), op, i);
975 thread->chan = dtc->chan;
977 thread->test_done.wait = &thread->done_wait;
978 init_waitqueue_head(&thread->done_wait);
980 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
981 dma_chan_name(chan), op, i);
982 if (IS_ERR(thread->task)) {
983 pr_warn("Failed to create thread %s-%s%u\n",
984 dma_chan_name(chan), op, i);
989 /* srcbuf and dstbuf are allocated by the thread itself */
990 get_task_struct(thread->task);
991 list_add_tail(&thread->node, &dtc->threads);
992 thread->pending = true;
998 static int dmatest_add_channel(struct dmatest_info *info,
999 struct dma_chan *chan)
1001 struct dmatest_chan *dtc;
1002 struct dma_device *dma_dev = chan->device;
1003 unsigned int thread_count = 0;
1006 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
1008 pr_warn("No memory for %s\n", dma_chan_name(chan));
1013 INIT_LIST_HEAD(&dtc->threads);
1015 if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) &&
1016 info->params.polled) {
1017 info->params.polled = false;
1018 pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n");
1021 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1023 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
1024 thread_count += cnt > 0 ? cnt : 0;
1028 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1030 cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
1031 thread_count += cnt > 0 ? cnt : 0;
1035 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1036 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
1037 thread_count += cnt > 0 ? cnt : 0;
1039 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1040 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
1041 thread_count += cnt > 0 ? cnt : 0;
1044 pr_info("Added %u threads using %s\n",
1045 thread_count, dma_chan_name(chan));
1047 list_add_tail(&dtc->node, &info->channels);
1048 info->nr_channels++;
1053 static bool filter(struct dma_chan *chan, void *param)
1055 struct dmatest_params *params = param;
1057 if (!dmatest_match_channel(params, chan) ||
1058 !dmatest_match_device(params, chan->device))
1064 static void request_channels(struct dmatest_info *info,
1065 enum dma_transaction_type type)
1067 dma_cap_mask_t mask;
1070 dma_cap_set(type, mask);
1072 struct dmatest_params *params = &info->params;
1073 struct dma_chan *chan;
1075 chan = dma_request_channel(mask, filter, params);
1077 if (dmatest_add_channel(info, chan)) {
1078 dma_release_channel(chan);
1079 break; /* add_channel failed, punt */
1082 break; /* no more channels available */
1083 if (params->max_channels &&
1084 info->nr_channels >= params->max_channels)
1085 break; /* we have all we need */
1089 static void add_threaded_test(struct dmatest_info *info)
1091 struct dmatest_params *params = &info->params;
1093 /* Copy test parameters */
1094 params->buf_size = test_buf_size;
1095 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
1096 strlcpy(params->device, strim(test_device), sizeof(params->device));
1097 params->threads_per_chan = threads_per_chan;
1098 params->max_channels = max_channels;
1099 params->iterations = iterations;
1100 params->xor_sources = xor_sources;
1101 params->pq_sources = pq_sources;
1102 params->timeout = timeout;
1103 params->noverify = noverify;
1104 params->norandom = norandom;
1105 params->alignment = alignment;
1106 params->transfer_size = transfer_size;
1107 params->polled = polled;
1109 request_channels(info, DMA_MEMCPY);
1110 request_channels(info, DMA_MEMSET);
1111 request_channels(info, DMA_XOR);
1112 request_channels(info, DMA_PQ);
1115 static void run_pending_tests(struct dmatest_info *info)
1117 struct dmatest_chan *dtc;
1118 unsigned int thread_count = 0;
1120 list_for_each_entry(dtc, &info->channels, node) {
1121 struct dmatest_thread *thread;
1124 list_for_each_entry(thread, &dtc->threads, node) {
1125 wake_up_process(thread->task);
1128 pr_info("Started %u threads using %s\n",
1129 thread_count, dma_chan_name(dtc->chan));
1133 static void stop_threaded_test(struct dmatest_info *info)
1135 struct dmatest_chan *dtc, *_dtc;
1136 struct dma_chan *chan;
1138 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
1139 list_del(&dtc->node);
1141 dmatest_cleanup_channel(dtc);
1142 pr_debug("dropped channel %s\n", dma_chan_name(chan));
1143 dma_release_channel(chan);
1146 info->nr_channels = 0;
1149 static void start_threaded_tests(struct dmatest_info *info)
1151 /* we might be called early to set run=, defer running until all
1152 * parameters have been evaluated
1154 if (!info->did_init)
1157 run_pending_tests(info);
1160 static int dmatest_run_get(char *val, const struct kernel_param *kp)
1162 struct dmatest_info *info = &test_info;
1164 mutex_lock(&info->lock);
1165 if (is_threaded_test_run(info)) {
1168 if (!is_threaded_test_pending(info))
1169 stop_threaded_test(info);
1170 dmatest_run = false;
1172 mutex_unlock(&info->lock);
1174 return param_get_bool(val, kp);
1177 static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1179 struct dmatest_info *info = &test_info;
1182 mutex_lock(&info->lock);
1183 ret = param_set_bool(val, kp);
1185 mutex_unlock(&info->lock);
1187 } else if (dmatest_run) {
1188 if (!is_threaded_test_pending(info)) {
1190 * We have nothing to run. This can be due to:
1192 ret = info->last_error;
1194 /* 1) Misconfiguration */
1195 pr_err("Channel misconfigured, can't continue\n");
1196 mutex_unlock(&info->lock);
1199 /* 2) We rely on defaults */
1200 pr_info("No channels configured, continue with any\n");
1201 if (!is_threaded_test_run(info))
1202 stop_threaded_test(info);
1203 add_threaded_test(info);
1206 start_threaded_tests(info);
1208 stop_threaded_test(info);
1211 mutex_unlock(&info->lock);
1216 static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
1218 struct dmatest_info *info = &test_info;
1219 struct dmatest_chan *dtc;
1220 char chan_reset_val[20];
1223 mutex_lock(&info->lock);
1224 ret = param_set_copystring(val, kp);
1226 mutex_unlock(&info->lock);
1229 /*Clear any previously run threads */
1230 if (!is_threaded_test_run(info) && !is_threaded_test_pending(info))
1231 stop_threaded_test(info);
1232 /* Reject channels that are already registered */
1233 if (is_threaded_test_pending(info)) {
1234 list_for_each_entry(dtc, &info->channels, node) {
1235 if (strcmp(dma_chan_name(dtc->chan),
1236 strim(test_channel)) == 0) {
1237 dtc = list_last_entry(&info->channels,
1238 struct dmatest_chan,
1240 strlcpy(chan_reset_val,
1241 dma_chan_name(dtc->chan),
1242 sizeof(chan_reset_val));
1249 add_threaded_test(info);
1251 /* Check if channel was added successfully */
1252 dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
1256 * if new channel was not successfully added, revert the
1257 * "test_channel" string to the name of the last successfully
1258 * added channel. exception for when users issues empty string
1259 * to channel parameter.
1261 if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
1262 && (strcmp("", strim(test_channel)) != 0)) {
1264 strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
1265 sizeof(chan_reset_val));
1270 /* Clear test_channel if no channels were added successfully */
1271 strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
1276 info->last_error = ret;
1277 mutex_unlock(&info->lock);
1282 param_set_copystring(chan_reset_val, kp);
1283 info->last_error = ret;
1284 mutex_unlock(&info->lock);
1289 static int dmatest_chan_get(char *val, const struct kernel_param *kp)
1291 struct dmatest_info *info = &test_info;
1293 mutex_lock(&info->lock);
1294 if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
1295 stop_threaded_test(info);
1296 strlcpy(test_channel, "", sizeof(test_channel));
1298 mutex_unlock(&info->lock);
1300 return param_get_string(val, kp);
1303 static int dmatest_test_list_get(char *val, const struct kernel_param *kp)
1305 struct dmatest_info *info = &test_info;
1306 struct dmatest_chan *dtc;
1307 unsigned int thread_count = 0;
1309 list_for_each_entry(dtc, &info->channels, node) {
1310 struct dmatest_thread *thread;
1313 list_for_each_entry(thread, &dtc->threads, node) {
1316 pr_info("%u threads using %s\n",
1317 thread_count, dma_chan_name(dtc->chan));
1323 static int __init dmatest_init(void)
1325 struct dmatest_info *info = &test_info;
1326 struct dmatest_params *params = &info->params;
1329 mutex_lock(&info->lock);
1330 add_threaded_test(info);
1331 run_pending_tests(info);
1332 mutex_unlock(&info->lock);
1335 if (params->iterations && wait)
1336 wait_event(thread_wait, !is_threaded_test_run(info));
1338 /* module parameters are stable, inittime tests are started,
1339 * let userspace take over 'run' control
1341 info->did_init = true;
1345 /* when compiled-in wait for drivers to load first */
1346 late_initcall(dmatest_init);
1348 static void __exit dmatest_exit(void)
1350 struct dmatest_info *info = &test_info;
1352 mutex_lock(&info->lock);
1353 stop_threaded_test(info);
1354 mutex_unlock(&info->lock);
1356 module_exit(dmatest_exit);
1358 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1359 MODULE_LICENSE("GPL v2");