2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DMA Coherent API Notes
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
16 * The default DMA address == Phy address which is 0x8000_0000 based.
19 #include <linux/dma-noncoherent.h>
20 #include <asm/cache.h>
21 #include <asm/cacheflush.h>
23 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
24 gfp_t gfp, unsigned long attrs)
26 unsigned long order = get_order(size);
30 int need_coh = 1, need_kvaddr = 0;
32 page = alloc_pages(gfp, order);
37 * IOC relies on all data (even coherent DMA data) being in cache
38 * Thus allocate normal cached memory
40 * The gains with IOC are two pronged:
41 * -For streaming data, elides need for cache maintenance, saving
42 * cycles in flush code, and bus bandwidth as all the lines of a
43 * buffer need to be flushed out to memory
44 * -For coherent data, Read/Write to buffers terminate early in cache
45 * (vs. always going to memory - thus are faster)
47 if ((is_isa_arcv2() && ioc_enable) ||
48 (attrs & DMA_ATTR_NON_CONSISTENT))
52 * - A coherent buffer needs MMU mapping to enforce non-cachability
53 * - A highmem page needs a virtual handle (hence MMU mapping)
54 * independent of cachability
56 if (PageHighMem(page) || need_coh)
59 /* This is linear addr (0x8000_0000 based) */
60 paddr = page_to_phys(page);
64 /* This is kernel Virtual address (0x7000_0000 based) */
66 kvaddr = ioremap_nocache(paddr, size);
68 __free_pages(page, order);
72 kvaddr = (void *)(u32)paddr;
76 * Evict any existing L1 and/or L2 lines for the backing page
77 * in case it was used earlier as a normal "cached" page.
78 * Yeah this bit us - STAR 9000898266
80 * Although core does call flush_cache_vmap(), it gets kvaddr hence
81 * can't be used to efficiently flush L1 and/or L2 which need paddr
82 * Currently flush_cache_vmap nukes the L1 cache completely which
83 * will be optimized as a separate commit
86 dma_cache_wback_inv(paddr, size);
91 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
92 dma_addr_t dma_handle, unsigned long attrs)
94 phys_addr_t paddr = dma_handle;
95 struct page *page = virt_to_page(paddr);
98 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
99 (is_isa_arcv2() && ioc_enable);
101 if (PageHighMem(page) || !is_non_coh)
102 iounmap((void __force __iomem *)vaddr);
104 __free_pages(page, get_order(size));
107 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
108 void *cpu_addr, dma_addr_t dma_addr, size_t size,
111 unsigned long user_count = vma_pages(vma);
112 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
113 unsigned long pfn = __phys_to_pfn(dma_addr);
114 unsigned long off = vma->vm_pgoff;
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
122 if (off < count && user_count <= (count - off)) {
123 ret = remap_pfn_range(vma, vma->vm_start,
125 user_count << PAGE_SHIFT,
133 * Cache operations depending on function and direction argument, inspired by
134 * https://lkml.org/lkml/2018/5/18/979
135 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
136 * dma-mapping: provide a generic dma-noncoherent implementation)"
138 * | map == for_device | unmap == for_cpu
139 * |----------------------------------------------------------------
140 * TO_DEV | writeback writeback | none none
141 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
142 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
144 * [*] needed for CPU speculative prefetches
146 * NOTE: we don't check the validity of direction argument as it is done in
147 * upper layer functions (in include/linux/dma-mapping.h)
150 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
151 size_t size, enum dma_data_direction dir)
155 dma_cache_wback(paddr, size);
158 case DMA_FROM_DEVICE:
159 dma_cache_inv(paddr, size);
162 case DMA_BIDIRECTIONAL:
163 dma_cache_wback_inv(paddr, size);
171 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
172 size_t size, enum dma_data_direction dir)
178 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
179 case DMA_FROM_DEVICE:
180 case DMA_BIDIRECTIONAL:
181 dma_cache_inv(paddr, size);