1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
37 #include <linux/pfn.h>
38 #include <linux/scatterlist.h>
39 #include <linux/set_memory.h>
40 #include <linux/spinlock.h>
41 #include <linux/string.h>
42 #include <linux/swiotlb.h>
43 #include <linux/types.h>
44 #ifdef CONFIG_DMA_RESTRICTED_POOL
46 #include <linux/of_fdt.h>
47 #include <linux/of_reserved_mem.h>
48 #include <linux/slab.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/swiotlb.h>
54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
57 * Minimum IO TLB size to bother booting with. Systems with mainly
58 * 64bit capable cards will only lightly use the swiotlb. If we can't
59 * allocate a contiguous 1MB, we're probably in trouble anyway.
61 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
63 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
66 phys_addr_t orig_addr;
71 static bool swiotlb_force_bounce;
72 static bool swiotlb_force_disable;
74 struct io_tlb_mem io_tlb_default_mem;
76 phys_addr_t swiotlb_unencrypted_base;
78 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
79 static unsigned long default_nareas;
82 * struct io_tlb_area - IO TLB memory area descriptor
84 * This is a single area with a single lock.
86 * @used: The number of used IO TLB block.
87 * @index: The slot index to start searching in this area for next round.
88 * @lock: The lock to protect the above data structures in the map and
98 * Round up number of slabs to the next power of 2. The last area is going
99 * be smaller than the rest if default_nslabs is not power of two.
100 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
101 * otherwise a segment may span two or more areas. It conflicts with free
102 * contiguous slots tracking: free slots are treated contiguous no matter
103 * whether they cross an area boundary.
105 * Return true if default_nslabs is rounded up.
107 static bool round_up_default_nslabs(void)
112 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
113 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
114 else if (is_power_of_2(default_nslabs))
116 default_nslabs = roundup_pow_of_two(default_nslabs);
120 static void swiotlb_adjust_nareas(unsigned int nareas)
122 /* use a single area when non is specified */
125 else if (!is_power_of_2(nareas))
126 nareas = roundup_pow_of_two(nareas);
128 default_nareas = nareas;
130 pr_info("area num %d.\n", nareas);
131 if (round_up_default_nslabs())
132 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
133 (default_nslabs << IO_TLB_SHIFT) >> 20);
137 setup_io_tlb_npages(char *str)
140 /* avoid tail segment of size < IO_TLB_SEGSIZE */
142 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
147 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
150 if (!strcmp(str, "force"))
151 swiotlb_force_bounce = true;
152 else if (!strcmp(str, "noforce"))
153 swiotlb_force_disable = true;
157 early_param("swiotlb", setup_io_tlb_npages);
159 unsigned long swiotlb_size_or_default(void)
161 return default_nslabs << IO_TLB_SHIFT;
164 void __init swiotlb_adjust_size(unsigned long size)
167 * If swiotlb parameter has not been specified, give a chance to
168 * architectures such as those supporting memory encryption to
169 * adjust/expand SWIOTLB size for their use.
171 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
174 size = ALIGN(size, IO_TLB_SIZE);
175 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
176 if (round_up_default_nslabs())
177 size = default_nslabs << IO_TLB_SHIFT;
178 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
181 void swiotlb_print_info(void)
183 struct io_tlb_mem *mem = &io_tlb_default_mem;
186 pr_warn("No low mem\n");
190 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
191 (mem->nslabs << IO_TLB_SHIFT) >> 20);
194 static inline unsigned long io_tlb_offset(unsigned long val)
196 return val & (IO_TLB_SEGSIZE - 1);
199 static inline unsigned long nr_slots(u64 val)
201 return DIV_ROUND_UP(val, IO_TLB_SIZE);
205 * Remap swioltb memory in the unencrypted physical address space
206 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
209 #ifdef CONFIG_HAS_IOMEM
210 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
214 if (swiotlb_unencrypted_base) {
215 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
217 vaddr = memremap(paddr, bytes, MEMREMAP_WB);
219 pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
226 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
233 * Early SWIOTLB allocation may be too early to allow an architecture to
234 * perform the desired operations. This function allows the architecture to
235 * call SWIOTLB when the operations are possible. It needs to be called
236 * before the SWIOTLB memory is used.
238 void __init swiotlb_update_mem_attributes(void)
240 struct io_tlb_mem *mem = &io_tlb_default_mem;
244 if (!mem->nslabs || mem->late_alloc)
246 vaddr = phys_to_virt(mem->start);
247 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
248 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
250 mem->vaddr = swiotlb_mem_remap(mem, bytes);
255 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
256 unsigned long nslabs, unsigned int flags,
257 bool late_alloc, unsigned int nareas)
259 void *vaddr = phys_to_virt(start);
260 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
262 mem->nslabs = nslabs;
264 mem->end = mem->start + bytes;
265 mem->late_alloc = late_alloc;
266 mem->nareas = nareas;
267 mem->area_nslabs = nslabs / mem->nareas;
269 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
271 for (i = 0; i < mem->nareas; i++) {
272 spin_lock_init(&mem->areas[i].lock);
273 mem->areas[i].index = 0;
274 mem->areas[i].used = 0;
277 for (i = 0; i < mem->nslabs; i++) {
278 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
279 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
280 mem->slots[i].alloc_size = 0;
284 * If swiotlb_unencrypted_base is set, the bounce buffer memory will
285 * be remapped and cleared in swiotlb_update_mem_attributes.
287 if (swiotlb_unencrypted_base)
290 memset(vaddr, 0, bytes);
295 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
297 int (*remap)(void *tlb, unsigned long nslabs))
299 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
303 * By default allocate the bounce buffer memory from low memory, but
304 * allow to pick a location everywhere for hypervisors with guest
307 if (flags & SWIOTLB_ANY)
308 tlb = memblock_alloc(bytes, PAGE_SIZE);
310 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
313 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
318 if (remap && remap(tlb, nslabs) < 0) {
319 memblock_free(tlb, PAGE_ALIGN(bytes));
320 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
328 * Statically reserve bounce buffer space and initialize bounce buffer data
329 * structures for the software IO TLB used to implement the DMA API.
331 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
332 int (*remap)(void *tlb, unsigned long nslabs))
334 struct io_tlb_mem *mem = &io_tlb_default_mem;
335 unsigned long nslabs;
339 if (!addressing_limit && !swiotlb_force_bounce)
341 if (swiotlb_force_disable)
345 * default_nslabs maybe changed when adjust area number.
346 * So allocate bounce buffer after adjusting area number.
349 swiotlb_adjust_nareas(num_possible_cpus());
351 nslabs = default_nslabs;
352 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
353 if (nslabs <= IO_TLB_MIN_SLABS)
355 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
358 if (default_nslabs != nslabs) {
359 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
360 default_nslabs, nslabs);
361 default_nslabs = nslabs;
364 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
365 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
367 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
368 __func__, alloc_size, PAGE_SIZE);
372 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
373 default_nareas), SMP_CACHE_BYTES);
375 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
379 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
382 if (flags & SWIOTLB_VERBOSE)
383 swiotlb_print_info();
386 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
388 swiotlb_init_remap(addressing_limit, flags, NULL);
392 * Systems with larger DMA zones (those that don't support ISA) can
393 * initialize the swiotlb later using the slab allocator if needed.
394 * This should be just like above, but with some error catching.
396 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
397 int (*remap)(void *tlb, unsigned long nslabs))
399 struct io_tlb_mem *mem = &io_tlb_default_mem;
400 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
401 unsigned char *vstart = NULL;
402 unsigned int order, area_order;
403 bool retried = false;
406 if (swiotlb_force_disable)
410 order = get_order(nslabs << IO_TLB_SHIFT);
411 nslabs = SLABS_PER_PAGE << order;
413 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
414 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
419 nslabs = SLABS_PER_PAGE << order;
427 rc = remap(vstart, nslabs);
429 free_pages((unsigned long)vstart, order);
431 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
432 if (nslabs < IO_TLB_MIN_SLABS)
439 pr_warn("only able to allocate %ld MB\n",
440 (PAGE_SIZE << order) >> 20);
444 swiotlb_adjust_nareas(num_possible_cpus());
446 area_order = get_order(array_size(sizeof(*mem->areas),
448 mem->areas = (struct io_tlb_area *)
449 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
453 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
454 get_order(array_size(sizeof(*mem->slots), nslabs)));
458 set_memory_decrypted((unsigned long)vstart,
459 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
460 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
463 swiotlb_print_info();
467 free_pages((unsigned long)mem->areas, area_order);
469 free_pages((unsigned long)vstart, order);
473 void __init swiotlb_exit(void)
475 struct io_tlb_mem *mem = &io_tlb_default_mem;
476 unsigned long tbl_vaddr;
477 size_t tbl_size, slots_size;
478 unsigned int area_order;
480 if (swiotlb_force_bounce)
486 pr_info("tearing down default memory pool\n");
487 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
488 tbl_size = PAGE_ALIGN(mem->end - mem->start);
489 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
491 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
492 if (mem->late_alloc) {
493 area_order = get_order(array_size(sizeof(*mem->areas),
495 free_pages((unsigned long)mem->areas, area_order);
496 free_pages(tbl_vaddr, get_order(tbl_size));
497 free_pages((unsigned long)mem->slots, get_order(slots_size));
499 memblock_free_late(__pa(mem->areas),
500 array_size(sizeof(*mem->areas), mem->nareas));
501 memblock_free_late(mem->start, tbl_size);
502 memblock_free_late(__pa(mem->slots), slots_size);
505 memset(mem, 0, sizeof(*mem));
509 * Return the offset into a iotlb slot required to keep the device happy.
511 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
513 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
517 * Bounce: copy the swiotlb buffer from or back to the original dma location
519 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
520 enum dma_data_direction dir)
522 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
523 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
524 phys_addr_t orig_addr = mem->slots[index].orig_addr;
525 size_t alloc_size = mem->slots[index].alloc_size;
526 unsigned long pfn = PFN_DOWN(orig_addr);
527 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
528 unsigned int tlb_offset, orig_addr_offset;
530 if (orig_addr == INVALID_PHYS_ADDR)
533 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
534 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
535 if (tlb_offset < orig_addr_offset) {
536 dev_WARN_ONCE(dev, 1,
537 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
538 orig_addr_offset, tlb_offset);
542 tlb_offset -= orig_addr_offset;
543 if (tlb_offset > alloc_size) {
544 dev_WARN_ONCE(dev, 1,
545 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
546 alloc_size, size, tlb_offset);
550 orig_addr += tlb_offset;
551 alloc_size -= tlb_offset;
553 if (size > alloc_size) {
554 dev_WARN_ONCE(dev, 1,
555 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
560 if (PageHighMem(pfn_to_page(pfn))) {
561 unsigned int offset = orig_addr & ~PAGE_MASK;
567 sz = min_t(size_t, PAGE_SIZE - offset, size);
569 local_irq_save(flags);
570 page = pfn_to_page(pfn);
571 if (dir == DMA_TO_DEVICE)
572 memcpy_from_page(vaddr, page, offset, sz);
574 memcpy_to_page(page, offset, vaddr, sz);
575 local_irq_restore(flags);
582 } else if (dir == DMA_TO_DEVICE) {
583 memcpy(vaddr, phys_to_virt(orig_addr), size);
585 memcpy(phys_to_virt(orig_addr), vaddr, size);
589 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
591 return start + (idx << IO_TLB_SHIFT);
595 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
597 static inline unsigned long get_max_slots(unsigned long boundary_mask)
599 if (boundary_mask == ~0UL)
600 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
601 return nr_slots(boundary_mask + 1);
604 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
606 if (index >= mem->area_nslabs)
612 * Find a suitable number of IO TLB entries size that will fit this request and
613 * allocate a buffer from that IO TLB pool.
615 static int swiotlb_do_find_slots(struct device *dev, int area_index,
616 phys_addr_t orig_addr, size_t alloc_size,
617 unsigned int alloc_align_mask)
619 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
620 struct io_tlb_area *area = mem->areas + area_index;
621 unsigned long boundary_mask = dma_get_seg_boundary(dev);
622 dma_addr_t tbl_dma_addr =
623 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
624 unsigned long max_slots = get_max_slots(boundary_mask);
625 unsigned int iotlb_align_mask =
626 dma_get_min_align_mask(dev) | alloc_align_mask;
627 unsigned int nslots = nr_slots(alloc_size), stride;
628 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
629 unsigned int index, slots_checked, count = 0, i;
631 unsigned int slot_base;
632 unsigned int slot_index;
635 BUG_ON(area_index >= mem->nareas);
638 * For allocations of PAGE_SIZE or larger only look for page aligned
641 if (alloc_size >= PAGE_SIZE)
642 iotlb_align_mask |= ~PAGE_MASK;
643 iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
646 * For mappings with an alignment requirement don't bother looping to
647 * unaligned slots once we found an aligned one.
649 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
651 spin_lock_irqsave(&area->lock, flags);
652 if (unlikely(nslots > mem->area_nslabs - area->used))
655 slot_base = area_index * mem->area_nslabs;
658 for (slots_checked = 0; slots_checked < mem->area_nslabs; ) {
659 slot_index = slot_base + index;
662 (slot_addr(tbl_dma_addr, slot_index) &
663 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
664 index = wrap_area_index(mem, index + 1);
670 * If we find a slot that indicates we have 'nslots' number of
671 * contiguous buffers, we allocate the buffers from that slot
672 * and mark the entries as '0' indicating unavailable.
674 if (!iommu_is_span_boundary(slot_index, nslots,
675 nr_slots(tbl_dma_addr),
677 if (mem->slots[slot_index].list >= nslots)
680 index = wrap_area_index(mem, index + stride);
681 slots_checked += stride;
685 spin_unlock_irqrestore(&area->lock, flags);
689 for (i = slot_index; i < slot_index + nslots; i++) {
690 mem->slots[i].list = 0;
691 mem->slots[i].alloc_size = alloc_size - (offset +
692 ((i - slot_index) << IO_TLB_SHIFT));
694 for (i = slot_index - 1;
695 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
696 mem->slots[i].list; i--)
697 mem->slots[i].list = ++count;
700 * Update the indices to avoid searching in the next round.
702 area->index = wrap_area_index(mem, index + nslots);
703 area->used += nslots;
704 spin_unlock_irqrestore(&area->lock, flags);
708 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
709 size_t alloc_size, unsigned int alloc_align_mask)
711 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
712 int start = raw_smp_processor_id() & (mem->nareas - 1);
713 int i = start, index;
716 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
720 if (++i >= mem->nareas)
722 } while (i != start);
727 static unsigned long mem_used(struct io_tlb_mem *mem)
730 unsigned long used = 0;
732 for (i = 0; i < mem->nareas; i++)
733 used += mem->areas[i].used;
737 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
738 size_t mapping_size, size_t alloc_size,
739 unsigned int alloc_align_mask, enum dma_data_direction dir,
742 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
743 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
746 phys_addr_t tlb_addr;
748 if (!mem || !mem->nslabs) {
749 dev_warn_ratelimited(dev,
750 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
751 return (phys_addr_t)DMA_MAPPING_ERROR;
754 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
755 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
757 if (mapping_size > alloc_size) {
758 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
759 mapping_size, alloc_size);
760 return (phys_addr_t)DMA_MAPPING_ERROR;
763 index = swiotlb_find_slots(dev, orig_addr,
764 alloc_size + offset, alloc_align_mask);
766 if (!(attrs & DMA_ATTR_NO_WARN))
767 dev_warn_ratelimited(dev,
768 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
769 alloc_size, mem->nslabs, mem_used(mem));
770 return (phys_addr_t)DMA_MAPPING_ERROR;
774 * Save away the mapping from the original address to the DMA address.
775 * This is needed when we sync the memory. Then we sync the buffer if
778 for (i = 0; i < nr_slots(alloc_size + offset); i++)
779 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
780 tlb_addr = slot_addr(mem->start, index) + offset;
782 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
783 * to the tlb buffer, if we knew for sure the device will
784 * overwrite the entire current content. But we don't. Thus
785 * unconditional bounce may prevent leaking swiotlb content (i.e.
786 * kernel memory) to user-space.
788 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
792 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
794 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
796 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
797 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
798 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
799 int aindex = index / mem->area_nslabs;
800 struct io_tlb_area *area = &mem->areas[aindex];
804 * Return the buffer to the free list by setting the corresponding
805 * entries to indicate the number of contiguous entries available.
806 * While returning the entries to the free list, we merge the entries
807 * with slots below and above the pool being returned.
809 BUG_ON(aindex >= mem->nareas);
811 spin_lock_irqsave(&area->lock, flags);
812 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
813 count = mem->slots[index + nslots].list;
818 * Step 1: return the slots to the free list, merging the slots with
821 for (i = index + nslots - 1; i >= index; i--) {
822 mem->slots[i].list = ++count;
823 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
824 mem->slots[i].alloc_size = 0;
828 * Step 2: merge the returned slots with the preceding slots, if
829 * available (non zero)
832 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
834 mem->slots[i].list = ++count;
835 area->used -= nslots;
836 spin_unlock_irqrestore(&area->lock, flags);
840 * tlb_addr is the physical address of the bounce buffer to unmap.
842 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
843 size_t mapping_size, enum dma_data_direction dir,
847 * First, sync the memory before unmapping the entry
849 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
850 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
851 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
853 swiotlb_release_slots(dev, tlb_addr);
856 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
857 size_t size, enum dma_data_direction dir)
859 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
860 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
862 BUG_ON(dir != DMA_FROM_DEVICE);
865 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
866 size_t size, enum dma_data_direction dir)
868 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
869 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
871 BUG_ON(dir != DMA_TO_DEVICE);
875 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
876 * to the device copy the data into it as well.
878 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
879 enum dma_data_direction dir, unsigned long attrs)
881 phys_addr_t swiotlb_addr;
884 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
886 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
888 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
889 return DMA_MAPPING_ERROR;
891 /* Ensure that the address returned is DMA'ble */
892 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
893 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
894 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
895 attrs | DMA_ATTR_SKIP_CPU_SYNC);
896 dev_WARN_ONCE(dev, 1,
897 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
898 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
899 return DMA_MAPPING_ERROR;
902 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
903 arch_sync_dma_for_device(swiotlb_addr, size, dir);
907 size_t swiotlb_max_mapping_size(struct device *dev)
909 int min_align_mask = dma_get_min_align_mask(dev);
913 * swiotlb_find_slots() skips slots according to
914 * min align mask. This affects max mapping size.
915 * Take it into acount here.
918 min_align = roundup(min_align_mask, IO_TLB_SIZE);
920 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
923 bool is_swiotlb_active(struct device *dev)
925 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
927 return mem && mem->nslabs;
929 EXPORT_SYMBOL_GPL(is_swiotlb_active);
931 static int io_tlb_used_get(void *data, u64 *val)
933 *val = mem_used(&io_tlb_default_mem);
936 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
938 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
941 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
945 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
946 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
950 static int __init __maybe_unused swiotlb_create_default_debugfs(void)
952 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
956 #ifdef CONFIG_DEBUG_FS
957 late_initcall(swiotlb_create_default_debugfs);
960 #ifdef CONFIG_DMA_RESTRICTED_POOL
962 struct page *swiotlb_alloc(struct device *dev, size_t size)
964 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
965 phys_addr_t tlb_addr;
971 index = swiotlb_find_slots(dev, 0, size, 0);
975 tlb_addr = slot_addr(mem->start, index);
977 return pfn_to_page(PFN_DOWN(tlb_addr));
980 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
982 phys_addr_t tlb_addr = page_to_phys(page);
984 if (!is_swiotlb_buffer(dev, tlb_addr))
987 swiotlb_release_slots(dev, tlb_addr);
992 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
995 struct io_tlb_mem *mem = rmem->priv;
996 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
998 /* Set Per-device io tlb area to one */
999 unsigned int nareas = 1;
1002 * Since multiple devices can share the same pool, the private data,
1003 * io_tlb_mem struct, will be initialized by the first device attached
1007 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1011 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
1017 mem->areas = kcalloc(nareas, sizeof(*mem->areas),
1025 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1026 rmem->size >> PAGE_SHIFT);
1027 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
1029 mem->for_alloc = true;
1033 swiotlb_create_debugfs_files(mem, rmem->name);
1036 dev->dma_io_tlb_mem = mem;
1041 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1044 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1047 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1048 .device_init = rmem_swiotlb_device_init,
1049 .device_release = rmem_swiotlb_device_release,
1052 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1054 unsigned long node = rmem->fdt_node;
1056 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1057 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1058 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1059 of_get_flat_dt_prop(node, "no-map", NULL))
1062 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1063 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
1067 rmem->ops = &rmem_swiotlb_ops;
1068 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1069 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1073 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1074 #endif /* CONFIG_DMA_RESTRICTED_POOL */