dma-direct: provide mmap and get_sgtable method overrides
[linux-2.6-microblaze.git] / arch / mips / mm / dma-noncoherent.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
5  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6  */
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/dma-contiguous.h>
10 #include <linux/highmem.h>
11
12 #include <asm/cache.h>
13 #include <asm/cpu-type.h>
14 #include <asm/dma-coherence.h>
15 #include <asm/io.h>
16
17 /*
18  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19  * fill random cachelines with stale data at any time, requiring an extra
20  * flush post-DMA.
21  *
22  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
23  * terminology calls memory areas with hardware maintained coherency coherent.
24  *
25  * Note that the R14000 and R16000 should also be checked for in this condition.
26  * However this function is only called on non-I/O-coherent systems and only the
27  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28  * SGI IP32 aka O2.
29  */
30 static inline bool cpu_needs_post_dma_flush(struct device *dev)
31 {
32         switch (boot_cpu_type()) {
33         case CPU_R10000:
34         case CPU_R12000:
35         case CPU_BMIPS5000:
36                 return true;
37         default:
38                 /*
39                  * Presence of MAARs suggests that the CPU supports
40                  * speculatively prefetching data, and therefore requires
41                  * the post-DMA flush/invalidate.
42                  */
43                 return cpu_has_maar;
44         }
45 }
46
47 void arch_dma_prep_coherent(struct page *page, size_t size)
48 {
49         dma_cache_wback_inv((unsigned long)page_address(page), size);
50 }
51
52 void *uncached_kernel_address(void *addr)
53 {
54         return (void *)(__pa(addr) + UNCAC_BASE);
55 }
56
57 void *cached_kernel_address(void *addr)
58 {
59         return __va(addr) - UNCAC_BASE;
60 }
61
62 static inline void dma_sync_virt(void *addr, size_t size,
63                 enum dma_data_direction dir)
64 {
65         switch (dir) {
66         case DMA_TO_DEVICE:
67                 dma_cache_wback((unsigned long)addr, size);
68                 break;
69
70         case DMA_FROM_DEVICE:
71                 dma_cache_inv((unsigned long)addr, size);
72                 break;
73
74         case DMA_BIDIRECTIONAL:
75                 dma_cache_wback_inv((unsigned long)addr, size);
76                 break;
77
78         default:
79                 BUG();
80         }
81 }
82
83 /*
84  * A single sg entry may refer to multiple physically contiguous pages.  But
85  * we still need to process highmem pages individually.  If highmem is not
86  * configured then the bulk of this loop gets optimized out.
87  */
88 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
89                 enum dma_data_direction dir)
90 {
91         struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
92         unsigned long offset = paddr & ~PAGE_MASK;
93         size_t left = size;
94
95         do {
96                 size_t len = left;
97
98                 if (PageHighMem(page)) {
99                         void *addr;
100
101                         if (offset + len > PAGE_SIZE)
102                                 len = PAGE_SIZE - offset;
103
104                         addr = kmap_atomic(page);
105                         dma_sync_virt(addr + offset, len, dir);
106                         kunmap_atomic(addr);
107                 } else
108                         dma_sync_virt(page_address(page) + offset, size, dir);
109                 offset = 0;
110                 page++;
111                 left -= len;
112         } while (left);
113 }
114
115 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
116                 size_t size, enum dma_data_direction dir)
117 {
118         dma_sync_phys(paddr, size, dir);
119 }
120
121 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
122 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
123                 size_t size, enum dma_data_direction dir)
124 {
125         if (cpu_needs_post_dma_flush(dev))
126                 dma_sync_phys(paddr, size, dir);
127 }
128 #endif
129
130 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
131                 enum dma_data_direction direction)
132 {
133         BUG_ON(direction == DMA_NONE);
134
135         dma_sync_virt(vaddr, size, direction);
136 }
137
138 #ifdef CONFIG_DMA_PERDEV_COHERENT
139 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
140                 const struct iommu_ops *iommu, bool coherent)
141 {
142         dev->dma_coherent = coherent;
143 }
144 #endif