1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
37 #include <linux/pfn.h>
38 #include <linux/scatterlist.h>
39 #include <linux/set_memory.h>
40 #include <linux/spinlock.h>
41 #include <linux/string.h>
42 #include <linux/swiotlb.h>
43 #include <linux/types.h>
44 #ifdef CONFIG_DMA_RESTRICTED_POOL
46 #include <linux/of_fdt.h>
47 #include <linux/of_reserved_mem.h>
48 #include <linux/slab.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/swiotlb.h>
54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
57 * Minimum IO TLB size to bother booting with. Systems with mainly
58 * 64bit capable cards will only lightly use the swiotlb. If we can't
59 * allocate a contiguous 1MB, we're probably in trouble anyway.
61 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
63 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
66 phys_addr_t orig_addr;
71 static bool swiotlb_force_bounce;
72 static bool swiotlb_force_disable;
74 struct io_tlb_mem io_tlb_default_mem;
76 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
77 static unsigned long default_nareas;
80 * struct io_tlb_area - IO TLB memory area descriptor
82 * This is a single area with a single lock.
84 * @used: The number of used IO TLB block.
85 * @index: The slot index to start searching in this area for next round.
86 * @lock: The lock to protect the above data structures in the map and
96 * Round up number of slabs to the next power of 2. The last area is going
97 * be smaller than the rest if default_nslabs is not power of two.
98 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
99 * otherwise a segment may span two or more areas. It conflicts with free
100 * contiguous slots tracking: free slots are treated contiguous no matter
101 * whether they cross an area boundary.
103 * Return true if default_nslabs is rounded up.
105 static bool round_up_default_nslabs(void)
110 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
111 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
112 else if (is_power_of_2(default_nslabs))
114 default_nslabs = roundup_pow_of_two(default_nslabs);
118 static void swiotlb_adjust_nareas(unsigned int nareas)
120 /* use a single area when non is specified */
123 else if (!is_power_of_2(nareas))
124 nareas = roundup_pow_of_two(nareas);
126 default_nareas = nareas;
128 pr_info("area num %d.\n", nareas);
129 if (round_up_default_nslabs())
130 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
131 (default_nslabs << IO_TLB_SHIFT) >> 20);
135 setup_io_tlb_npages(char *str)
138 /* avoid tail segment of size < IO_TLB_SEGSIZE */
140 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
145 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
148 if (!strcmp(str, "force"))
149 swiotlb_force_bounce = true;
150 else if (!strcmp(str, "noforce"))
151 swiotlb_force_disable = true;
155 early_param("swiotlb", setup_io_tlb_npages);
157 unsigned long swiotlb_size_or_default(void)
159 return default_nslabs << IO_TLB_SHIFT;
162 void __init swiotlb_adjust_size(unsigned long size)
165 * If swiotlb parameter has not been specified, give a chance to
166 * architectures such as those supporting memory encryption to
167 * adjust/expand SWIOTLB size for their use.
169 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
172 size = ALIGN(size, IO_TLB_SIZE);
173 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
174 if (round_up_default_nslabs())
175 size = default_nslabs << IO_TLB_SHIFT;
176 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
179 void swiotlb_print_info(void)
181 struct io_tlb_mem *mem = &io_tlb_default_mem;
184 pr_warn("No low mem\n");
188 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
189 (mem->nslabs << IO_TLB_SHIFT) >> 20);
192 static inline unsigned long io_tlb_offset(unsigned long val)
194 return val & (IO_TLB_SEGSIZE - 1);
197 static inline unsigned long nr_slots(u64 val)
199 return DIV_ROUND_UP(val, IO_TLB_SIZE);
203 * Early SWIOTLB allocation may be too early to allow an architecture to
204 * perform the desired operations. This function allows the architecture to
205 * call SWIOTLB when the operations are possible. It needs to be called
206 * before the SWIOTLB memory is used.
208 void __init swiotlb_update_mem_attributes(void)
210 struct io_tlb_mem *mem = &io_tlb_default_mem;
213 if (!mem->nslabs || mem->late_alloc)
215 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
216 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
219 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
220 unsigned long nslabs, unsigned int flags,
221 bool late_alloc, unsigned int nareas)
223 void *vaddr = phys_to_virt(start);
224 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
226 mem->nslabs = nslabs;
228 mem->end = mem->start + bytes;
229 mem->late_alloc = late_alloc;
230 mem->nareas = nareas;
231 mem->area_nslabs = nslabs / mem->nareas;
233 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
235 for (i = 0; i < mem->nareas; i++) {
236 spin_lock_init(&mem->areas[i].lock);
237 mem->areas[i].index = 0;
238 mem->areas[i].used = 0;
241 for (i = 0; i < mem->nslabs; i++) {
242 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
243 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
244 mem->slots[i].alloc_size = 0;
247 memset(vaddr, 0, bytes);
252 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
254 int (*remap)(void *tlb, unsigned long nslabs))
256 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
260 * By default allocate the bounce buffer memory from low memory, but
261 * allow to pick a location everywhere for hypervisors with guest
264 if (flags & SWIOTLB_ANY)
265 tlb = memblock_alloc(bytes, PAGE_SIZE);
267 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
270 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
275 if (remap && remap(tlb, nslabs) < 0) {
276 memblock_free(tlb, PAGE_ALIGN(bytes));
277 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
285 * Statically reserve bounce buffer space and initialize bounce buffer data
286 * structures for the software IO TLB used to implement the DMA API.
288 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
289 int (*remap)(void *tlb, unsigned long nslabs))
291 struct io_tlb_mem *mem = &io_tlb_default_mem;
292 unsigned long nslabs;
296 if (!addressing_limit && !swiotlb_force_bounce)
298 if (swiotlb_force_disable)
302 * default_nslabs maybe changed when adjust area number.
303 * So allocate bounce buffer after adjusting area number.
306 swiotlb_adjust_nareas(num_possible_cpus());
308 nslabs = default_nslabs;
309 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
310 if (nslabs <= IO_TLB_MIN_SLABS)
312 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
315 if (default_nslabs != nslabs) {
316 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
317 default_nslabs, nslabs);
318 default_nslabs = nslabs;
321 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
322 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
324 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
325 __func__, alloc_size, PAGE_SIZE);
329 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
330 default_nareas), SMP_CACHE_BYTES);
332 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
336 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
339 if (flags & SWIOTLB_VERBOSE)
340 swiotlb_print_info();
343 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
345 swiotlb_init_remap(addressing_limit, flags, NULL);
349 * Systems with larger DMA zones (those that don't support ISA) can
350 * initialize the swiotlb later using the slab allocator if needed.
351 * This should be just like above, but with some error catching.
353 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
354 int (*remap)(void *tlb, unsigned long nslabs))
356 struct io_tlb_mem *mem = &io_tlb_default_mem;
357 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
358 unsigned char *vstart = NULL;
359 unsigned int order, area_order;
360 bool retried = false;
363 if (swiotlb_force_disable)
367 order = get_order(nslabs << IO_TLB_SHIFT);
368 nslabs = SLABS_PER_PAGE << order;
370 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
371 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
376 nslabs = SLABS_PER_PAGE << order;
384 rc = remap(vstart, nslabs);
386 free_pages((unsigned long)vstart, order);
388 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
389 if (nslabs < IO_TLB_MIN_SLABS)
396 pr_warn("only able to allocate %ld MB\n",
397 (PAGE_SIZE << order) >> 20);
401 swiotlb_adjust_nareas(num_possible_cpus());
403 area_order = get_order(array_size(sizeof(*mem->areas),
405 mem->areas = (struct io_tlb_area *)
406 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
410 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
411 get_order(array_size(sizeof(*mem->slots), nslabs)));
415 set_memory_decrypted((unsigned long)vstart,
416 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
417 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
420 swiotlb_print_info();
424 free_pages((unsigned long)mem->areas, area_order);
426 free_pages((unsigned long)vstart, order);
430 void __init swiotlb_exit(void)
432 struct io_tlb_mem *mem = &io_tlb_default_mem;
433 unsigned long tbl_vaddr;
434 size_t tbl_size, slots_size;
435 unsigned int area_order;
437 if (swiotlb_force_bounce)
443 pr_info("tearing down default memory pool\n");
444 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
445 tbl_size = PAGE_ALIGN(mem->end - mem->start);
446 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
448 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
449 if (mem->late_alloc) {
450 area_order = get_order(array_size(sizeof(*mem->areas),
452 free_pages((unsigned long)mem->areas, area_order);
453 free_pages(tbl_vaddr, get_order(tbl_size));
454 free_pages((unsigned long)mem->slots, get_order(slots_size));
456 memblock_free_late(__pa(mem->areas),
457 array_size(sizeof(*mem->areas), mem->nareas));
458 memblock_free_late(mem->start, tbl_size);
459 memblock_free_late(__pa(mem->slots), slots_size);
462 memset(mem, 0, sizeof(*mem));
466 * Return the offset into a iotlb slot required to keep the device happy.
468 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
470 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
474 * Bounce: copy the swiotlb buffer from or back to the original dma location
476 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
477 enum dma_data_direction dir)
479 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
480 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
481 phys_addr_t orig_addr = mem->slots[index].orig_addr;
482 size_t alloc_size = mem->slots[index].alloc_size;
483 unsigned long pfn = PFN_DOWN(orig_addr);
484 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
485 unsigned int tlb_offset, orig_addr_offset;
487 if (orig_addr == INVALID_PHYS_ADDR)
490 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
491 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
492 if (tlb_offset < orig_addr_offset) {
493 dev_WARN_ONCE(dev, 1,
494 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
495 orig_addr_offset, tlb_offset);
499 tlb_offset -= orig_addr_offset;
500 if (tlb_offset > alloc_size) {
501 dev_WARN_ONCE(dev, 1,
502 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
503 alloc_size, size, tlb_offset);
507 orig_addr += tlb_offset;
508 alloc_size -= tlb_offset;
510 if (size > alloc_size) {
511 dev_WARN_ONCE(dev, 1,
512 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
517 if (PageHighMem(pfn_to_page(pfn))) {
518 unsigned int offset = orig_addr & ~PAGE_MASK;
524 sz = min_t(size_t, PAGE_SIZE - offset, size);
526 local_irq_save(flags);
527 page = pfn_to_page(pfn);
528 if (dir == DMA_TO_DEVICE)
529 memcpy_from_page(vaddr, page, offset, sz);
531 memcpy_to_page(page, offset, vaddr, sz);
532 local_irq_restore(flags);
539 } else if (dir == DMA_TO_DEVICE) {
540 memcpy(vaddr, phys_to_virt(orig_addr), size);
542 memcpy(phys_to_virt(orig_addr), vaddr, size);
546 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
548 return start + (idx << IO_TLB_SHIFT);
552 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
554 static inline unsigned long get_max_slots(unsigned long boundary_mask)
556 if (boundary_mask == ~0UL)
557 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
558 return nr_slots(boundary_mask + 1);
561 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
563 if (index >= mem->area_nslabs)
569 * Track the total used slots with a global atomic value in order to have
570 * correct information to determine the high water mark. The mem_used()
571 * function gives imprecise results because there's no locking across
574 #ifdef CONFIG_DEBUG_FS
575 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
577 unsigned long old_hiwater, new_used;
579 new_used = atomic_long_add_return(nslots, &mem->total_used);
580 old_hiwater = atomic_long_read(&mem->used_hiwater);
582 if (new_used <= old_hiwater)
584 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
585 &old_hiwater, new_used));
588 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
590 atomic_long_sub(nslots, &mem->total_used);
593 #else /* !CONFIG_DEBUG_FS */
594 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
597 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
600 #endif /* CONFIG_DEBUG_FS */
603 * Find a suitable number of IO TLB entries size that will fit this request and
604 * allocate a buffer from that IO TLB pool.
606 static int swiotlb_do_find_slots(struct device *dev, int area_index,
607 phys_addr_t orig_addr, size_t alloc_size,
608 unsigned int alloc_align_mask)
610 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
611 struct io_tlb_area *area = mem->areas + area_index;
612 unsigned long boundary_mask = dma_get_seg_boundary(dev);
613 dma_addr_t tbl_dma_addr =
614 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
615 unsigned long max_slots = get_max_slots(boundary_mask);
616 unsigned int iotlb_align_mask =
617 dma_get_min_align_mask(dev) | alloc_align_mask;
618 unsigned int nslots = nr_slots(alloc_size), stride;
619 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
620 unsigned int index, slots_checked, count = 0, i;
622 unsigned int slot_base;
623 unsigned int slot_index;
626 BUG_ON(area_index >= mem->nareas);
629 * For allocations of PAGE_SIZE or larger only look for page aligned
632 if (alloc_size >= PAGE_SIZE)
633 iotlb_align_mask |= ~PAGE_MASK;
634 iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
637 * For mappings with an alignment requirement don't bother looping to
638 * unaligned slots once we found an aligned one.
640 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
642 spin_lock_irqsave(&area->lock, flags);
643 if (unlikely(nslots > mem->area_nslabs - area->used))
646 slot_base = area_index * mem->area_nslabs;
649 for (slots_checked = 0; slots_checked < mem->area_nslabs; ) {
650 slot_index = slot_base + index;
653 (slot_addr(tbl_dma_addr, slot_index) &
654 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
655 index = wrap_area_index(mem, index + 1);
661 * If we find a slot that indicates we have 'nslots' number of
662 * contiguous buffers, we allocate the buffers from that slot
663 * and mark the entries as '0' indicating unavailable.
665 if (!iommu_is_span_boundary(slot_index, nslots,
666 nr_slots(tbl_dma_addr),
668 if (mem->slots[slot_index].list >= nslots)
671 index = wrap_area_index(mem, index + stride);
672 slots_checked += stride;
676 spin_unlock_irqrestore(&area->lock, flags);
680 for (i = slot_index; i < slot_index + nslots; i++) {
681 mem->slots[i].list = 0;
682 mem->slots[i].alloc_size = alloc_size - (offset +
683 ((i - slot_index) << IO_TLB_SHIFT));
685 for (i = slot_index - 1;
686 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
687 mem->slots[i].list; i--)
688 mem->slots[i].list = ++count;
691 * Update the indices to avoid searching in the next round.
693 area->index = wrap_area_index(mem, index + nslots);
694 area->used += nslots;
695 spin_unlock_irqrestore(&area->lock, flags);
697 inc_used_and_hiwater(mem, nslots);
701 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
702 size_t alloc_size, unsigned int alloc_align_mask)
704 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
705 int start = raw_smp_processor_id() & (mem->nareas - 1);
706 int i = start, index;
709 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
713 if (++i >= mem->nareas)
715 } while (i != start);
720 static unsigned long mem_used(struct io_tlb_mem *mem)
723 unsigned long used = 0;
725 for (i = 0; i < mem->nareas; i++)
726 used += mem->areas[i].used;
730 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
731 size_t mapping_size, size_t alloc_size,
732 unsigned int alloc_align_mask, enum dma_data_direction dir,
735 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
736 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
739 phys_addr_t tlb_addr;
741 if (!mem || !mem->nslabs) {
742 dev_warn_ratelimited(dev,
743 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
744 return (phys_addr_t)DMA_MAPPING_ERROR;
747 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
748 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
750 if (mapping_size > alloc_size) {
751 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
752 mapping_size, alloc_size);
753 return (phys_addr_t)DMA_MAPPING_ERROR;
756 index = swiotlb_find_slots(dev, orig_addr,
757 alloc_size + offset, alloc_align_mask);
759 if (!(attrs & DMA_ATTR_NO_WARN))
760 dev_warn_ratelimited(dev,
761 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
762 alloc_size, mem->nslabs, mem_used(mem));
763 return (phys_addr_t)DMA_MAPPING_ERROR;
767 * Save away the mapping from the original address to the DMA address.
768 * This is needed when we sync the memory. Then we sync the buffer if
771 for (i = 0; i < nr_slots(alloc_size + offset); i++)
772 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
773 tlb_addr = slot_addr(mem->start, index) + offset;
775 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
776 * to the tlb buffer, if we knew for sure the device will
777 * overwrite the entire current content. But we don't. Thus
778 * unconditional bounce may prevent leaking swiotlb content (i.e.
779 * kernel memory) to user-space.
781 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
785 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
787 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
789 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
790 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
791 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
792 int aindex = index / mem->area_nslabs;
793 struct io_tlb_area *area = &mem->areas[aindex];
797 * Return the buffer to the free list by setting the corresponding
798 * entries to indicate the number of contiguous entries available.
799 * While returning the entries to the free list, we merge the entries
800 * with slots below and above the pool being returned.
802 BUG_ON(aindex >= mem->nareas);
804 spin_lock_irqsave(&area->lock, flags);
805 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
806 count = mem->slots[index + nslots].list;
811 * Step 1: return the slots to the free list, merging the slots with
814 for (i = index + nslots - 1; i >= index; i--) {
815 mem->slots[i].list = ++count;
816 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
817 mem->slots[i].alloc_size = 0;
821 * Step 2: merge the returned slots with the preceding slots, if
822 * available (non zero)
825 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
827 mem->slots[i].list = ++count;
828 area->used -= nslots;
829 spin_unlock_irqrestore(&area->lock, flags);
831 dec_used(mem, nslots);
835 * tlb_addr is the physical address of the bounce buffer to unmap.
837 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
838 size_t mapping_size, enum dma_data_direction dir,
842 * First, sync the memory before unmapping the entry
844 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
845 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
846 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
848 swiotlb_release_slots(dev, tlb_addr);
851 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
852 size_t size, enum dma_data_direction dir)
854 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
855 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
857 BUG_ON(dir != DMA_FROM_DEVICE);
860 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
861 size_t size, enum dma_data_direction dir)
863 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
864 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
866 BUG_ON(dir != DMA_TO_DEVICE);
870 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
871 * to the device copy the data into it as well.
873 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
874 enum dma_data_direction dir, unsigned long attrs)
876 phys_addr_t swiotlb_addr;
879 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
881 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
883 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
884 return DMA_MAPPING_ERROR;
886 /* Ensure that the address returned is DMA'ble */
887 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
888 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
889 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
890 attrs | DMA_ATTR_SKIP_CPU_SYNC);
891 dev_WARN_ONCE(dev, 1,
892 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
893 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
894 return DMA_MAPPING_ERROR;
897 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
898 arch_sync_dma_for_device(swiotlb_addr, size, dir);
902 size_t swiotlb_max_mapping_size(struct device *dev)
904 int min_align_mask = dma_get_min_align_mask(dev);
908 * swiotlb_find_slots() skips slots according to
909 * min align mask. This affects max mapping size.
910 * Take it into acount here.
913 min_align = roundup(min_align_mask, IO_TLB_SIZE);
915 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
918 bool is_swiotlb_active(struct device *dev)
920 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
922 return mem && mem->nslabs;
924 EXPORT_SYMBOL_GPL(is_swiotlb_active);
926 #ifdef CONFIG_DEBUG_FS
928 static int io_tlb_used_get(void *data, u64 *val)
930 struct io_tlb_mem *mem = data;
932 *val = mem_used(mem);
936 static int io_tlb_hiwater_get(void *data, u64 *val)
938 struct io_tlb_mem *mem = data;
940 *val = atomic_long_read(&mem->used_hiwater);
944 static int io_tlb_hiwater_set(void *data, u64 val)
946 struct io_tlb_mem *mem = data;
948 /* Only allow setting to zero */
952 atomic_long_set(&mem->used_hiwater, val);
956 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
957 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
958 io_tlb_hiwater_set, "%llu\n");
960 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
963 atomic_long_set(&mem->total_used, 0);
964 atomic_long_set(&mem->used_hiwater, 0);
966 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
970 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
971 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
973 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
974 &fops_io_tlb_hiwater);
977 static int __init swiotlb_create_default_debugfs(void)
979 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
983 late_initcall(swiotlb_create_default_debugfs);
985 #else /* !CONFIG_DEBUG_FS */
987 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
992 #endif /* CONFIG_DEBUG_FS */
994 #ifdef CONFIG_DMA_RESTRICTED_POOL
996 struct page *swiotlb_alloc(struct device *dev, size_t size)
998 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
999 phys_addr_t tlb_addr;
1005 index = swiotlb_find_slots(dev, 0, size, 0);
1009 tlb_addr = slot_addr(mem->start, index);
1011 return pfn_to_page(PFN_DOWN(tlb_addr));
1014 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1016 phys_addr_t tlb_addr = page_to_phys(page);
1018 if (!is_swiotlb_buffer(dev, tlb_addr))
1021 swiotlb_release_slots(dev, tlb_addr);
1026 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1029 struct io_tlb_mem *mem = rmem->priv;
1030 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1032 /* Set Per-device io tlb area to one */
1033 unsigned int nareas = 1;
1035 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1036 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1041 * Since multiple devices can share the same pool, the private data,
1042 * io_tlb_mem struct, will be initialized by the first device attached
1046 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1050 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
1056 mem->areas = kcalloc(nareas, sizeof(*mem->areas),
1064 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1065 rmem->size >> PAGE_SHIFT);
1066 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
1068 mem->for_alloc = true;
1072 swiotlb_create_debugfs_files(mem, rmem->name);
1075 dev->dma_io_tlb_mem = mem;
1080 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1083 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1086 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1087 .device_init = rmem_swiotlb_device_init,
1088 .device_release = rmem_swiotlb_device_release,
1091 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1093 unsigned long node = rmem->fdt_node;
1095 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1096 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1097 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1098 of_get_flat_dt_prop(node, "no-map", NULL))
1101 rmem->ops = &rmem_swiotlb_ops;
1102 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1103 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1107 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1108 #endif /* CONFIG_DMA_RESTRICTED_POOL */