2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define pr_fmt(fmt) "DMA-API: " fmt
22 #include <linux/sched/task_stack.h>
23 #include <linux/scatterlist.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/sched/task.h>
26 #include <linux/stacktrace.h>
27 #include <linux/dma-debug.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
30 #include <linux/debugfs.h>
31 #include <linux/uaccess.h>
32 #include <linux/export.h>
33 #include <linux/device.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/ctype.h>
37 #include <linux/list.h>
38 #include <linux/slab.h>
40 #include <asm/sections.h>
42 #define HASH_SIZE 1024ULL
43 #define HASH_FN_SHIFT 13
44 #define HASH_FN_MASK (HASH_SIZE - 1)
46 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
47 /* If the pool runs out, add this many new entries at once */
48 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
59 MAP_ERR_CHECK_NOT_APPLICABLE,
64 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
67 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
68 * @list: node on pre-allocated free_entries list
69 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
70 * @type: single, page, sg, coherent
71 * @pfn: page frame of the start address
72 * @offset: offset of mapping relative to pfn
73 * @size: length of the mapping
74 * @direction: enum dma_data_direction
75 * @sg_call_ents: 'nents' from dma_map_sg
76 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
77 * @map_err_type: track whether dma_mapping_error() was checked
78 * @stacktrace: support backtraces when a violation is detected
80 struct dma_debug_entry {
81 struct list_head list;
91 enum map_err_types map_err_type;
92 #ifdef CONFIG_STACKTRACE
93 struct stack_trace stacktrace;
94 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
98 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
101 struct list_head list;
103 } ____cacheline_aligned_in_smp;
105 /* Hash list to save the allocated dma addresses */
106 static struct hash_bucket dma_entry_hash[HASH_SIZE];
107 /* List of pre-allocated dma_debug_entry's */
108 static LIST_HEAD(free_entries);
109 /* Lock for the list above */
110 static DEFINE_SPINLOCK(free_entries_lock);
112 /* Global disable flag - will be set in case of an error */
113 static bool global_disable __read_mostly;
115 /* Early initialization disable flag, set at the end of dma_debug_init */
116 static bool dma_debug_initialized __read_mostly;
118 static inline bool dma_debug_disabled(void)
120 return global_disable || !dma_debug_initialized;
123 /* Global error count */
124 static u32 error_count;
126 /* Global error show enable*/
127 static u32 show_all_errors __read_mostly;
128 /* Number of errors to show */
129 static u32 show_num_errors = 1;
131 static u32 num_free_entries;
132 static u32 min_free_entries;
133 static u32 nr_total_entries;
135 /* number of preallocated entries requested by kernel cmdline */
136 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
138 /* debugfs dentry's for the stuff above */
139 static struct dentry *dma_debug_dent __read_mostly;
140 static struct dentry *global_disable_dent __read_mostly;
141 static struct dentry *error_count_dent __read_mostly;
142 static struct dentry *show_all_errors_dent __read_mostly;
143 static struct dentry *show_num_errors_dent __read_mostly;
144 static struct dentry *num_free_entries_dent __read_mostly;
145 static struct dentry *min_free_entries_dent __read_mostly;
146 static struct dentry *nr_total_entries_dent __read_mostly;
147 static struct dentry *filter_dent __read_mostly;
149 /* per-driver filter related state */
151 #define NAME_MAX_LEN 64
153 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
154 static struct device_driver *current_driver __read_mostly;
156 static DEFINE_RWLOCK(driver_name_lock);
158 static const char *const maperr2str[] = {
159 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
160 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
161 [MAP_ERR_CHECKED] = "dma map error checked",
164 static const char *type2name[5] = { "single", "page",
165 "scather-gather", "coherent",
168 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
169 "DMA_FROM_DEVICE", "DMA_NONE" };
172 * The access to some variables in this macro is racy. We can't use atomic_t
173 * here because all these variables are exported to debugfs. Some of them even
174 * writeable. This is also the reason why a lock won't help much. But anyway,
175 * the races are no big deal. Here is why:
177 * error_count: the addition is racy, but the worst thing that can happen is
178 * that we don't count some errors
179 * show_num_errors: the subtraction is racy. Also no big deal because in
180 * worst case this will result in one warning more in the
181 * system log than the user configured. This variable is
182 * writeable via debugfs.
184 static inline void dump_entry_trace(struct dma_debug_entry *entry)
186 #ifdef CONFIG_STACKTRACE
188 pr_warning("Mapped at:\n");
189 print_stack_trace(&entry->stacktrace, 0);
194 static bool driver_filter(struct device *dev)
196 struct device_driver *drv;
200 /* driver filter off */
201 if (likely(!current_driver_name[0]))
204 /* driver filter on and initialized */
205 if (current_driver && dev && dev->driver == current_driver)
208 /* driver filter on, but we can't filter on a NULL device... */
212 if (current_driver || !current_driver_name[0])
215 /* driver filter on but not yet initialized */
220 /* lock to protect against change of current_driver_name */
221 read_lock_irqsave(&driver_name_lock, flags);
225 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
226 current_driver = drv;
230 read_unlock_irqrestore(&driver_name_lock, flags);
235 #define err_printk(dev, entry, format, arg...) do { \
237 if (driver_filter(dev) && \
238 (show_all_errors || show_num_errors > 0)) { \
239 WARN(1, pr_fmt("%s %s: ") format, \
240 dev ? dev_driver_string(dev) : "NULL", \
241 dev ? dev_name(dev) : "NULL", ## arg); \
242 dump_entry_trace(entry); \
244 if (!show_all_errors && show_num_errors > 0) \
245 show_num_errors -= 1; \
249 * Hash related functions
251 * Every DMA-API request is saved into a struct dma_debug_entry. To
252 * have quick access to these structs they are stored into a hash.
254 static int hash_fn(struct dma_debug_entry *entry)
257 * Hash function is based on the dma address.
258 * We use bits 20-27 here as the index into the hash
260 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
264 * Request exclusive access to a hash bucket for a given dma_debug_entry.
266 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
267 unsigned long *flags)
268 __acquires(&dma_entry_hash[idx].lock)
270 int idx = hash_fn(entry);
271 unsigned long __flags;
273 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
275 return &dma_entry_hash[idx];
279 * Give up exclusive access to the hash bucket
281 static void put_hash_bucket(struct hash_bucket *bucket,
282 unsigned long *flags)
283 __releases(&bucket->lock)
285 unsigned long __flags = *flags;
287 spin_unlock_irqrestore(&bucket->lock, __flags);
290 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
292 return ((a->dev_addr == b->dev_addr) &&
293 (a->dev == b->dev)) ? true : false;
296 static bool containing_match(struct dma_debug_entry *a,
297 struct dma_debug_entry *b)
299 if (a->dev != b->dev)
302 if ((b->dev_addr <= a->dev_addr) &&
303 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
310 * Search a given entry in the hash bucket list
312 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
313 struct dma_debug_entry *ref,
316 struct dma_debug_entry *entry, *ret = NULL;
317 int matches = 0, match_lvl, last_lvl = -1;
319 list_for_each_entry(entry, &bucket->list, list) {
320 if (!match(ref, entry))
324 * Some drivers map the same physical address multiple
325 * times. Without a hardware IOMMU this results in the
326 * same device addresses being put into the dma-debug
327 * hash multiple times too. This can result in false
328 * positives being reported. Therefore we implement a
329 * best-fit algorithm here which returns the entry from
330 * the hash which fits best to the reference value
331 * instead of the first-fit.
335 entry->size == ref->size ? ++match_lvl : 0;
336 entry->type == ref->type ? ++match_lvl : 0;
337 entry->direction == ref->direction ? ++match_lvl : 0;
338 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
340 if (match_lvl == 4) {
341 /* perfect-fit - return the result */
343 } else if (match_lvl > last_lvl) {
345 * We found an entry that fits better then the
346 * previous one or it is the 1st match.
348 last_lvl = match_lvl;
354 * If we have multiple matches but no perfect-fit, just return
357 ret = (matches == 1) ? ret : NULL;
362 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
363 struct dma_debug_entry *ref)
365 return __hash_bucket_find(bucket, ref, exact_match);
368 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
369 struct dma_debug_entry *ref,
370 unsigned long *flags)
373 unsigned int max_range = dma_get_max_seg_size(ref->dev);
374 struct dma_debug_entry *entry, index = *ref;
375 unsigned int range = 0;
377 while (range <= max_range) {
378 entry = __hash_bucket_find(*bucket, ref, containing_match);
384 * Nothing found, go back a hash bucket
386 put_hash_bucket(*bucket, flags);
387 range += (1 << HASH_FN_SHIFT);
388 index.dev_addr -= (1 << HASH_FN_SHIFT);
389 *bucket = get_hash_bucket(&index, flags);
396 * Add an entry to a hash bucket
398 static void hash_bucket_add(struct hash_bucket *bucket,
399 struct dma_debug_entry *entry)
401 list_add_tail(&entry->list, &bucket->list);
405 * Remove entry from a hash bucket list
407 static void hash_bucket_del(struct dma_debug_entry *entry)
409 list_del(&entry->list);
412 static unsigned long long phys_addr(struct dma_debug_entry *entry)
414 if (entry->type == dma_debug_resource)
415 return __pfn_to_phys(entry->pfn) + entry->offset;
417 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
421 * Dump mapping entries for debugging purposes
423 void debug_dma_dump_mappings(struct device *dev)
427 for (idx = 0; idx < HASH_SIZE; idx++) {
428 struct hash_bucket *bucket = &dma_entry_hash[idx];
429 struct dma_debug_entry *entry;
432 spin_lock_irqsave(&bucket->lock, flags);
434 list_for_each_entry(entry, &bucket->list, list) {
435 if (!dev || dev == entry->dev) {
437 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
438 type2name[entry->type], idx,
439 phys_addr(entry), entry->pfn,
440 entry->dev_addr, entry->size,
441 dir2name[entry->direction],
442 maperr2str[entry->map_err_type]);
446 spin_unlock_irqrestore(&bucket->lock, flags);
451 * For each mapping (initial cacheline in the case of
452 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
453 * scatterlist, or the cacheline specified in dma_map_single) insert
454 * into this tree using the cacheline as the key. At
455 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
456 * the entry already exists at insertion time add a tag as a reference
457 * count for the overlapping mappings. For now, the overlap tracking
458 * just ensures that 'unmaps' balance 'maps' before marking the
459 * cacheline idle, but we should also be flagging overlaps as an API
462 * Memory usage is mostly constrained by the maximum number of available
463 * dma-debug entries in that we need a free dma_debug_entry before
464 * inserting into the tree. In the case of dma_map_page and
465 * dma_alloc_coherent there is only one dma_debug_entry and one
466 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
467 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
468 * entries into the tree.
470 * At any time debug_dma_assert_idle() can be called to trigger a
471 * warning if any cachelines in the given page are in the active set.
473 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
474 static DEFINE_SPINLOCK(radix_lock);
475 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
476 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
477 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
479 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
481 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
482 (entry->offset >> L1_CACHE_SHIFT);
485 static int active_cacheline_read_overlap(phys_addr_t cln)
489 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
490 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
495 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
499 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
502 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
503 if (overlap & 1 << i)
504 radix_tree_tag_set(&dma_active_cacheline, cln, i);
506 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
511 static void active_cacheline_inc_overlap(phys_addr_t cln)
513 int overlap = active_cacheline_read_overlap(cln);
515 overlap = active_cacheline_set_overlap(cln, ++overlap);
517 /* If we overflowed the overlap counter then we're potentially
518 * leaking dma-mappings. Otherwise, if maps and unmaps are
519 * balanced then this overflow may cause false negatives in
520 * debug_dma_assert_idle() as the cacheline may be marked idle
523 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
524 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
525 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
528 static int active_cacheline_dec_overlap(phys_addr_t cln)
530 int overlap = active_cacheline_read_overlap(cln);
532 return active_cacheline_set_overlap(cln, --overlap);
535 static int active_cacheline_insert(struct dma_debug_entry *entry)
537 phys_addr_t cln = to_cacheline_number(entry);
541 /* If the device is not writing memory then we don't have any
542 * concerns about the cpu consuming stale data. This mitigates
543 * legitimate usages of overlapping mappings.
545 if (entry->direction == DMA_TO_DEVICE)
548 spin_lock_irqsave(&radix_lock, flags);
549 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
551 active_cacheline_inc_overlap(cln);
552 spin_unlock_irqrestore(&radix_lock, flags);
557 static void active_cacheline_remove(struct dma_debug_entry *entry)
559 phys_addr_t cln = to_cacheline_number(entry);
562 /* ...mirror the insert case */
563 if (entry->direction == DMA_TO_DEVICE)
566 spin_lock_irqsave(&radix_lock, flags);
567 /* since we are counting overlaps the final put of the
568 * cacheline will occur when the overlap count is 0.
569 * active_cacheline_dec_overlap() returns -1 in that case
571 if (active_cacheline_dec_overlap(cln) < 0)
572 radix_tree_delete(&dma_active_cacheline, cln);
573 spin_unlock_irqrestore(&radix_lock, flags);
577 * debug_dma_assert_idle() - assert that a page is not undergoing dma
578 * @page: page to lookup in the dma_active_cacheline tree
580 * Place a call to this routine in cases where the cpu touching the page
581 * before the dma completes (page is dma_unmapped) will lead to data
584 void debug_dma_assert_idle(struct page *page)
586 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
587 struct dma_debug_entry *entry = NULL;
588 void **results = (void **) &ents;
589 unsigned int nents, i;
593 if (dma_debug_disabled())
599 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
600 spin_lock_irqsave(&radix_lock, flags);
601 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
602 CACHELINES_PER_PAGE);
603 for (i = 0; i < nents; i++) {
604 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
606 if (ent_cln == cln) {
609 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
612 spin_unlock_irqrestore(&radix_lock, flags);
617 cln = to_cacheline_number(entry);
618 err_printk(entry->dev, entry,
619 "cpu touching an active dma mapped cacheline [cln=%pa]\n",
624 * Wrapper function for adding an entry to the hash.
625 * This function takes care of locking itself.
627 static void add_dma_entry(struct dma_debug_entry *entry)
629 struct hash_bucket *bucket;
633 bucket = get_hash_bucket(entry, &flags);
634 hash_bucket_add(bucket, entry);
635 put_hash_bucket(bucket, &flags);
637 rc = active_cacheline_insert(entry);
639 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
640 global_disable = true;
643 /* TODO: report -EEXIST errors here as overlapping mappings are
644 * not supported by the DMA API
648 static int dma_debug_create_entries(gfp_t gfp)
650 struct dma_debug_entry *entry;
653 entry = (void *)get_zeroed_page(gfp);
657 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
658 list_add_tail(&entry[i].list, &free_entries);
660 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
661 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
666 static struct dma_debug_entry *__dma_entry_alloc(void)
668 struct dma_debug_entry *entry;
670 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
671 list_del(&entry->list);
672 memset(entry, 0, sizeof(*entry));
674 num_free_entries -= 1;
675 if (num_free_entries < min_free_entries)
676 min_free_entries = num_free_entries;
681 void __dma_entry_alloc_check_leak(void)
683 u32 tmp = nr_total_entries % nr_prealloc_entries;
685 /* Shout each time we tick over some multiple of the initial pool */
686 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
687 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
689 (nr_total_entries / nr_prealloc_entries));
693 /* struct dma_entry allocator
695 * The next two functions implement the allocator for
696 * struct dma_debug_entries.
698 static struct dma_debug_entry *dma_entry_alloc(void)
700 struct dma_debug_entry *entry;
703 spin_lock_irqsave(&free_entries_lock, flags);
704 if (num_free_entries == 0) {
705 if (dma_debug_create_entries(GFP_ATOMIC)) {
706 global_disable = true;
707 spin_unlock_irqrestore(&free_entries_lock, flags);
708 pr_err("debugging out of memory - disabling\n");
711 __dma_entry_alloc_check_leak();
714 entry = __dma_entry_alloc();
716 spin_unlock_irqrestore(&free_entries_lock, flags);
718 #ifdef CONFIG_STACKTRACE
719 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
720 entry->stacktrace.entries = entry->st_entries;
721 entry->stacktrace.skip = 2;
722 save_stack_trace(&entry->stacktrace);
728 static void dma_entry_free(struct dma_debug_entry *entry)
732 active_cacheline_remove(entry);
735 * add to beginning of the list - this way the entries are
736 * more likely cache hot when they are reallocated.
738 spin_lock_irqsave(&free_entries_lock, flags);
739 list_add(&entry->list, &free_entries);
740 num_free_entries += 1;
741 spin_unlock_irqrestore(&free_entries_lock, flags);
745 * DMA-API debugging init code
747 * The init code does two things:
748 * 1. Initialize core data structures
749 * 2. Preallocate a given number of dma_debug_entry structs
752 static ssize_t filter_read(struct file *file, char __user *user_buf,
753 size_t count, loff_t *ppos)
755 char buf[NAME_MAX_LEN + 1];
759 if (!current_driver_name[0])
763 * We can't copy to userspace directly because current_driver_name can
764 * only be read under the driver_name_lock with irqs disabled. So
765 * create a temporary copy first.
767 read_lock_irqsave(&driver_name_lock, flags);
768 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
769 read_unlock_irqrestore(&driver_name_lock, flags);
771 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
774 static ssize_t filter_write(struct file *file, const char __user *userbuf,
775 size_t count, loff_t *ppos)
777 char buf[NAME_MAX_LEN];
783 * We can't copy from userspace directly. Access to
784 * current_driver_name is protected with a write_lock with irqs
785 * disabled. Since copy_from_user can fault and may sleep we
786 * need to copy to temporary buffer first
788 len = min(count, (size_t)(NAME_MAX_LEN - 1));
789 if (copy_from_user(buf, userbuf, len))
794 write_lock_irqsave(&driver_name_lock, flags);
797 * Now handle the string we got from userspace very carefully.
799 * - only use the first token we got
800 * - token delimiter is everything looking like a space
801 * character (' ', '\n', '\t' ...)
804 if (!isalnum(buf[0])) {
806 * If the first character userspace gave us is not
807 * alphanumerical then assume the filter should be
810 if (current_driver_name[0])
811 pr_info("switching off dma-debug driver filter\n");
812 current_driver_name[0] = 0;
813 current_driver = NULL;
818 * Now parse out the first token and use it as the name for the
819 * driver to filter for.
821 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
822 current_driver_name[i] = buf[i];
823 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
826 current_driver_name[i] = 0;
827 current_driver = NULL;
829 pr_info("enable driver filter for driver [%s]\n",
830 current_driver_name);
833 write_unlock_irqrestore(&driver_name_lock, flags);
838 static const struct file_operations filter_fops = {
840 .write = filter_write,
841 .llseek = default_llseek,
844 static int dma_debug_fs_init(void)
846 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
847 if (!dma_debug_dent) {
848 pr_err("can not create debugfs directory\n");
852 global_disable_dent = debugfs_create_bool("disabled", 0444,
855 if (!global_disable_dent)
858 error_count_dent = debugfs_create_u32("error_count", 0444,
859 dma_debug_dent, &error_count);
860 if (!error_count_dent)
863 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
866 if (!show_all_errors_dent)
869 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
872 if (!show_num_errors_dent)
875 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
878 if (!num_free_entries_dent)
881 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
884 if (!min_free_entries_dent)
887 nr_total_entries_dent = debugfs_create_u32("nr_total_entries", 0444,
890 if (!nr_total_entries_dent)
893 filter_dent = debugfs_create_file("driver_filter", 0644,
894 dma_debug_dent, NULL, &filter_fops);
901 debugfs_remove_recursive(dma_debug_dent);
906 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
908 struct dma_debug_entry *entry;
912 for (i = 0; i < HASH_SIZE; ++i) {
913 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
914 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
915 if (entry->dev == dev) {
920 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
926 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
928 struct device *dev = data;
929 struct dma_debug_entry *uninitialized_var(entry);
932 if (dma_debug_disabled())
936 case BUS_NOTIFY_UNBOUND_DRIVER:
937 count = device_dma_allocations(dev, &entry);
940 err_printk(dev, entry, "device driver has pending "
941 "DMA allocations while released from device "
943 "One of leaked entries details: "
944 "[device address=0x%016llx] [size=%llu bytes] "
945 "[mapped with %s] [mapped as %s]\n",
946 count, entry->dev_addr, entry->size,
947 dir2name[entry->direction], type2name[entry->type]);
956 void dma_debug_add_bus(struct bus_type *bus)
958 struct notifier_block *nb;
960 if (dma_debug_disabled())
963 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
965 pr_err("dma_debug_add_bus: out of memory\n");
969 nb->notifier_call = dma_debug_device_change;
971 bus_register_notifier(bus, nb);
974 static int dma_debug_init(void)
978 /* Do not use dma_debug_initialized here, since we really want to be
979 * called to set dma_debug_initialized
984 for (i = 0; i < HASH_SIZE; ++i) {
985 INIT_LIST_HEAD(&dma_entry_hash[i].list);
986 spin_lock_init(&dma_entry_hash[i].lock);
989 if (dma_debug_fs_init() != 0) {
990 pr_err("error creating debugfs entries - disabling\n");
991 global_disable = true;
996 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
997 for (i = 0; i < nr_pages; ++i)
998 dma_debug_create_entries(GFP_KERNEL);
999 if (num_free_entries >= nr_prealloc_entries) {
1000 pr_info("preallocated %d debug entries\n", nr_total_entries);
1001 } else if (num_free_entries > 0) {
1002 pr_warn("%d debug entries requested but only %d allocated\n",
1003 nr_prealloc_entries, nr_total_entries);
1005 pr_err("debugging out of memory error - disabled\n");
1006 global_disable = true;
1010 min_free_entries = num_free_entries;
1012 dma_debug_initialized = true;
1014 pr_info("debugging enabled by kernel config\n");
1017 core_initcall(dma_debug_init);
1019 static __init int dma_debug_cmdline(char *str)
1024 if (strncmp(str, "off", 3) == 0) {
1025 pr_info("debugging disabled on kernel command line\n");
1026 global_disable = true;
1032 static __init int dma_debug_entries_cmdline(char *str)
1036 if (!get_option(&str, &nr_prealloc_entries))
1037 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
1041 __setup("dma_debug=", dma_debug_cmdline);
1042 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
1044 static void check_unmap(struct dma_debug_entry *ref)
1046 struct dma_debug_entry *entry;
1047 struct hash_bucket *bucket;
1048 unsigned long flags;
1050 bucket = get_hash_bucket(ref, &flags);
1051 entry = bucket_find_exact(bucket, ref);
1054 /* must drop lock before calling dma_mapping_error */
1055 put_hash_bucket(bucket, &flags);
1057 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1058 err_printk(ref->dev, NULL,
1059 "device driver tries to free an "
1060 "invalid DMA memory address\n");
1062 err_printk(ref->dev, NULL,
1063 "device driver tries to free DMA "
1064 "memory it has not allocated [device "
1065 "address=0x%016llx] [size=%llu bytes]\n",
1066 ref->dev_addr, ref->size);
1071 if (ref->size != entry->size) {
1072 err_printk(ref->dev, entry, "device driver frees "
1073 "DMA memory with different size "
1074 "[device address=0x%016llx] [map size=%llu bytes] "
1075 "[unmap size=%llu bytes]\n",
1076 ref->dev_addr, entry->size, ref->size);
1079 if (ref->type != entry->type) {
1080 err_printk(ref->dev, entry, "device driver frees "
1081 "DMA memory with wrong function "
1082 "[device address=0x%016llx] [size=%llu bytes] "
1083 "[mapped as %s] [unmapped as %s]\n",
1084 ref->dev_addr, ref->size,
1085 type2name[entry->type], type2name[ref->type]);
1086 } else if ((entry->type == dma_debug_coherent) &&
1087 (phys_addr(ref) != phys_addr(entry))) {
1088 err_printk(ref->dev, entry, "device driver frees "
1089 "DMA memory with different CPU address "
1090 "[device address=0x%016llx] [size=%llu bytes] "
1091 "[cpu alloc address=0x%016llx] "
1092 "[cpu free address=0x%016llx]",
1093 ref->dev_addr, ref->size,
1098 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1099 ref->sg_call_ents != entry->sg_call_ents) {
1100 err_printk(ref->dev, entry, "device driver frees "
1101 "DMA sg list with different entry count "
1102 "[map count=%d] [unmap count=%d]\n",
1103 entry->sg_call_ents, ref->sg_call_ents);
1107 * This may be no bug in reality - but most implementations of the
1108 * DMA API don't handle this properly, so check for it here
1110 if (ref->direction != entry->direction) {
1111 err_printk(ref->dev, entry, "device driver frees "
1112 "DMA memory with different direction "
1113 "[device address=0x%016llx] [size=%llu bytes] "
1114 "[mapped with %s] [unmapped with %s]\n",
1115 ref->dev_addr, ref->size,
1116 dir2name[entry->direction],
1117 dir2name[ref->direction]);
1121 * Drivers should use dma_mapping_error() to check the returned
1122 * addresses of dma_map_single() and dma_map_page().
1123 * If not, print this warning message. See Documentation/DMA-API.txt.
1125 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1126 err_printk(ref->dev, entry,
1127 "device driver failed to check map error"
1128 "[device address=0x%016llx] [size=%llu bytes] "
1130 ref->dev_addr, ref->size,
1131 type2name[entry->type]);
1134 hash_bucket_del(entry);
1135 dma_entry_free(entry);
1137 put_hash_bucket(bucket, &flags);
1140 static void check_for_stack(struct device *dev,
1141 struct page *page, size_t offset)
1144 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1146 if (!stack_vm_area) {
1147 /* Stack is direct-mapped. */
1148 if (PageHighMem(page))
1150 addr = page_address(page) + offset;
1151 if (object_is_on_stack(addr))
1152 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1154 /* Stack is vmalloced. */
1157 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1158 if (page != stack_vm_area->pages[i])
1161 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1162 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1168 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1170 unsigned long a1 = (unsigned long)addr;
1171 unsigned long b1 = a1 + len;
1172 unsigned long a2 = (unsigned long)start;
1173 unsigned long b2 = (unsigned long)end;
1175 return !(b1 <= a2 || a1 >= b2);
1178 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1180 if (overlap(addr, len, _stext, _etext) ||
1181 overlap(addr, len, __start_rodata, __end_rodata))
1182 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1185 static void check_sync(struct device *dev,
1186 struct dma_debug_entry *ref,
1189 struct dma_debug_entry *entry;
1190 struct hash_bucket *bucket;
1191 unsigned long flags;
1193 bucket = get_hash_bucket(ref, &flags);
1195 entry = bucket_find_contain(&bucket, ref, &flags);
1198 err_printk(dev, NULL, "device driver tries "
1199 "to sync DMA memory it has not allocated "
1200 "[device address=0x%016llx] [size=%llu bytes]\n",
1201 (unsigned long long)ref->dev_addr, ref->size);
1205 if (ref->size > entry->size) {
1206 err_printk(dev, entry, "device driver syncs"
1207 " DMA memory outside allocated range "
1208 "[device address=0x%016llx] "
1209 "[allocation size=%llu bytes] "
1210 "[sync offset+size=%llu]\n",
1211 entry->dev_addr, entry->size,
1215 if (entry->direction == DMA_BIDIRECTIONAL)
1218 if (ref->direction != entry->direction) {
1219 err_printk(dev, entry, "device driver syncs "
1220 "DMA memory with different direction "
1221 "[device address=0x%016llx] [size=%llu bytes] "
1222 "[mapped with %s] [synced with %s]\n",
1223 (unsigned long long)ref->dev_addr, entry->size,
1224 dir2name[entry->direction],
1225 dir2name[ref->direction]);
1228 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1229 !(ref->direction == DMA_TO_DEVICE))
1230 err_printk(dev, entry, "device driver syncs "
1231 "device read-only DMA memory for cpu "
1232 "[device address=0x%016llx] [size=%llu bytes] "
1233 "[mapped with %s] [synced with %s]\n",
1234 (unsigned long long)ref->dev_addr, entry->size,
1235 dir2name[entry->direction],
1236 dir2name[ref->direction]);
1238 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1239 !(ref->direction == DMA_FROM_DEVICE))
1240 err_printk(dev, entry, "device driver syncs "
1241 "device write-only DMA memory to device "
1242 "[device address=0x%016llx] [size=%llu bytes] "
1243 "[mapped with %s] [synced with %s]\n",
1244 (unsigned long long)ref->dev_addr, entry->size,
1245 dir2name[entry->direction],
1246 dir2name[ref->direction]);
1248 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1249 ref->sg_call_ents != entry->sg_call_ents) {
1250 err_printk(ref->dev, entry, "device driver syncs "
1251 "DMA sg list with different entry count "
1252 "[map count=%d] [sync count=%d]\n",
1253 entry->sg_call_ents, ref->sg_call_ents);
1257 put_hash_bucket(bucket, &flags);
1260 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1262 #ifdef CONFIG_DMA_API_DEBUG_SG
1263 unsigned int max_seg = dma_get_max_seg_size(dev);
1264 u64 start, end, boundary = dma_get_seg_boundary(dev);
1267 * Either the driver forgot to set dma_parms appropriately, or
1268 * whoever generated the list forgot to check them.
1270 if (sg->length > max_seg)
1271 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1272 sg->length, max_seg);
1274 * In some cases this could potentially be the DMA API
1275 * implementation's fault, but it would usually imply that
1276 * the scatterlist was built inappropriately to begin with.
1278 start = sg_dma_address(sg);
1279 end = start + sg_dma_len(sg) - 1;
1280 if ((start ^ end) & ~boundary)
1281 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1282 start, end, boundary);
1286 void debug_dma_map_single(struct device *dev, const void *addr,
1289 if (unlikely(dma_debug_disabled()))
1292 if (!virt_addr_valid(addr))
1293 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1296 if (is_vmalloc_addr(addr))
1297 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1300 EXPORT_SYMBOL(debug_dma_map_single);
1302 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1303 size_t size, int direction, dma_addr_t dma_addr,
1306 struct dma_debug_entry *entry;
1308 if (unlikely(dma_debug_disabled()))
1311 if (dma_mapping_error(dev, dma_addr))
1314 entry = dma_entry_alloc();
1319 entry->type = dma_debug_page;
1320 entry->pfn = page_to_pfn(page);
1321 entry->offset = offset,
1322 entry->dev_addr = dma_addr;
1324 entry->direction = direction;
1325 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1328 entry->type = dma_debug_single;
1330 check_for_stack(dev, page, offset);
1332 if (!PageHighMem(page)) {
1333 void *addr = page_address(page) + offset;
1335 check_for_illegal_area(dev, addr, size);
1338 add_dma_entry(entry);
1340 EXPORT_SYMBOL(debug_dma_map_page);
1342 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1344 struct dma_debug_entry ref;
1345 struct dma_debug_entry *entry;
1346 struct hash_bucket *bucket;
1347 unsigned long flags;
1349 if (unlikely(dma_debug_disabled()))
1353 ref.dev_addr = dma_addr;
1354 bucket = get_hash_bucket(&ref, &flags);
1356 list_for_each_entry(entry, &bucket->list, list) {
1357 if (!exact_match(&ref, entry))
1361 * The same physical address can be mapped multiple
1362 * times. Without a hardware IOMMU this results in the
1363 * same device addresses being put into the dma-debug
1364 * hash multiple times too. This can result in false
1365 * positives being reported. Therefore we implement a
1366 * best-fit algorithm here which updates the first entry
1367 * from the hash which fits the reference value and is
1368 * not currently listed as being checked.
1370 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1371 entry->map_err_type = MAP_ERR_CHECKED;
1376 put_hash_bucket(bucket, &flags);
1378 EXPORT_SYMBOL(debug_dma_mapping_error);
1380 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1381 size_t size, int direction, bool map_single)
1383 struct dma_debug_entry ref = {
1384 .type = dma_debug_page,
1388 .direction = direction,
1391 if (unlikely(dma_debug_disabled()))
1395 ref.type = dma_debug_single;
1399 EXPORT_SYMBOL(debug_dma_unmap_page);
1401 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1402 int nents, int mapped_ents, int direction)
1404 struct dma_debug_entry *entry;
1405 struct scatterlist *s;
1408 if (unlikely(dma_debug_disabled()))
1411 for_each_sg(sg, s, mapped_ents, i) {
1412 entry = dma_entry_alloc();
1416 entry->type = dma_debug_sg;
1418 entry->pfn = page_to_pfn(sg_page(s));
1419 entry->offset = s->offset,
1420 entry->size = sg_dma_len(s);
1421 entry->dev_addr = sg_dma_address(s);
1422 entry->direction = direction;
1423 entry->sg_call_ents = nents;
1424 entry->sg_mapped_ents = mapped_ents;
1426 check_for_stack(dev, sg_page(s), s->offset);
1428 if (!PageHighMem(sg_page(s))) {
1429 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1432 check_sg_segment(dev, s);
1434 add_dma_entry(entry);
1437 EXPORT_SYMBOL(debug_dma_map_sg);
1439 static int get_nr_mapped_entries(struct device *dev,
1440 struct dma_debug_entry *ref)
1442 struct dma_debug_entry *entry;
1443 struct hash_bucket *bucket;
1444 unsigned long flags;
1447 bucket = get_hash_bucket(ref, &flags);
1448 entry = bucket_find_exact(bucket, ref);
1452 mapped_ents = entry->sg_mapped_ents;
1453 put_hash_bucket(bucket, &flags);
1458 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1459 int nelems, int dir)
1461 struct scatterlist *s;
1462 int mapped_ents = 0, i;
1464 if (unlikely(dma_debug_disabled()))
1467 for_each_sg(sglist, s, nelems, i) {
1469 struct dma_debug_entry ref = {
1470 .type = dma_debug_sg,
1472 .pfn = page_to_pfn(sg_page(s)),
1473 .offset = s->offset,
1474 .dev_addr = sg_dma_address(s),
1475 .size = sg_dma_len(s),
1477 .sg_call_ents = nelems,
1480 if (mapped_ents && i >= mapped_ents)
1484 mapped_ents = get_nr_mapped_entries(dev, &ref);
1489 EXPORT_SYMBOL(debug_dma_unmap_sg);
1491 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1492 dma_addr_t dma_addr, void *virt)
1494 struct dma_debug_entry *entry;
1496 if (unlikely(dma_debug_disabled()))
1499 if (unlikely(virt == NULL))
1502 /* handle vmalloc and linear addresses */
1503 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1506 entry = dma_entry_alloc();
1510 entry->type = dma_debug_coherent;
1512 entry->offset = offset_in_page(virt);
1514 entry->dev_addr = dma_addr;
1515 entry->direction = DMA_BIDIRECTIONAL;
1517 if (is_vmalloc_addr(virt))
1518 entry->pfn = vmalloc_to_pfn(virt);
1520 entry->pfn = page_to_pfn(virt_to_page(virt));
1522 add_dma_entry(entry);
1524 EXPORT_SYMBOL(debug_dma_alloc_coherent);
1526 void debug_dma_free_coherent(struct device *dev, size_t size,
1527 void *virt, dma_addr_t addr)
1529 struct dma_debug_entry ref = {
1530 .type = dma_debug_coherent,
1532 .offset = offset_in_page(virt),
1535 .direction = DMA_BIDIRECTIONAL,
1538 /* handle vmalloc and linear addresses */
1539 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1542 if (is_vmalloc_addr(virt))
1543 ref.pfn = vmalloc_to_pfn(virt);
1545 ref.pfn = page_to_pfn(virt_to_page(virt));
1547 if (unlikely(dma_debug_disabled()))
1552 EXPORT_SYMBOL(debug_dma_free_coherent);
1554 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1555 int direction, dma_addr_t dma_addr)
1557 struct dma_debug_entry *entry;
1559 if (unlikely(dma_debug_disabled()))
1562 entry = dma_entry_alloc();
1566 entry->type = dma_debug_resource;
1568 entry->pfn = PHYS_PFN(addr);
1569 entry->offset = offset_in_page(addr);
1571 entry->dev_addr = dma_addr;
1572 entry->direction = direction;
1573 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1575 add_dma_entry(entry);
1577 EXPORT_SYMBOL(debug_dma_map_resource);
1579 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1580 size_t size, int direction)
1582 struct dma_debug_entry ref = {
1583 .type = dma_debug_resource,
1585 .dev_addr = dma_addr,
1587 .direction = direction,
1590 if (unlikely(dma_debug_disabled()))
1595 EXPORT_SYMBOL(debug_dma_unmap_resource);
1597 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1598 size_t size, int direction)
1600 struct dma_debug_entry ref;
1602 if (unlikely(dma_debug_disabled()))
1605 ref.type = dma_debug_single;
1607 ref.dev_addr = dma_handle;
1609 ref.direction = direction;
1610 ref.sg_call_ents = 0;
1612 check_sync(dev, &ref, true);
1614 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1616 void debug_dma_sync_single_for_device(struct device *dev,
1617 dma_addr_t dma_handle, size_t size,
1620 struct dma_debug_entry ref;
1622 if (unlikely(dma_debug_disabled()))
1625 ref.type = dma_debug_single;
1627 ref.dev_addr = dma_handle;
1629 ref.direction = direction;
1630 ref.sg_call_ents = 0;
1632 check_sync(dev, &ref, false);
1634 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1636 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1637 int nelems, int direction)
1639 struct scatterlist *s;
1640 int mapped_ents = 0, i;
1642 if (unlikely(dma_debug_disabled()))
1645 for_each_sg(sg, s, nelems, i) {
1647 struct dma_debug_entry ref = {
1648 .type = dma_debug_sg,
1650 .pfn = page_to_pfn(sg_page(s)),
1651 .offset = s->offset,
1652 .dev_addr = sg_dma_address(s),
1653 .size = sg_dma_len(s),
1654 .direction = direction,
1655 .sg_call_ents = nelems,
1659 mapped_ents = get_nr_mapped_entries(dev, &ref);
1661 if (i >= mapped_ents)
1664 check_sync(dev, &ref, true);
1667 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1669 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1670 int nelems, int direction)
1672 struct scatterlist *s;
1673 int mapped_ents = 0, i;
1675 if (unlikely(dma_debug_disabled()))
1678 for_each_sg(sg, s, nelems, i) {
1680 struct dma_debug_entry ref = {
1681 .type = dma_debug_sg,
1683 .pfn = page_to_pfn(sg_page(s)),
1684 .offset = s->offset,
1685 .dev_addr = sg_dma_address(s),
1686 .size = sg_dma_len(s),
1687 .direction = direction,
1688 .sg_call_ents = nelems,
1691 mapped_ents = get_nr_mapped_entries(dev, &ref);
1693 if (i >= mapped_ents)
1696 check_sync(dev, &ref, false);
1699 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1701 static int __init dma_debug_driver_setup(char *str)
1705 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1706 current_driver_name[i] = *str;
1711 if (current_driver_name[0])
1712 pr_info("enable driver filter for driver [%s]\n",
1713 current_driver_name);
1718 __setup("dma_debug_driver=", dma_debug_driver_setup);