csky: Move static keyword to the front of declaration
[linux-2.6-microblaze.git] / arch / csky / mm / dma-mapping.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4 #include <linux/cache.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dma-contiguous.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/genalloc.h>
9 #include <linux/highmem.h>
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/scatterlist.h>
13 #include <linux/types.h>
14 #include <linux/version.h>
15 #include <asm/cache.h>
16
17 static int __init atomic_pool_init(void)
18 {
19         return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
20 }
21 postcore_initcall(atomic_pool_init);
22
23 static inline void cache_op(phys_addr_t paddr, size_t size,
24                             void (*fn)(unsigned long start, unsigned long end))
25 {
26         struct page *page    = phys_to_page(paddr);
27         void *start          = __va(page_to_phys(page));
28         unsigned long offset = offset_in_page(paddr);
29         size_t left          = size;
30
31         do {
32                 size_t len = left;
33
34                 if (offset + len > PAGE_SIZE)
35                         len = PAGE_SIZE - offset;
36
37                 if (PageHighMem(page)) {
38                         start = kmap_atomic(page);
39
40                         fn((unsigned long)start + offset,
41                                         (unsigned long)start + offset + len);
42
43                         kunmap_atomic(start);
44                 } else {
45                         fn((unsigned long)start + offset,
46                                         (unsigned long)start + offset + len);
47                 }
48                 offset = 0;
49
50                 page++;
51                 start += PAGE_SIZE;
52                 left -= len;
53         } while (left);
54 }
55
56 static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
57 {
58         memset((void *)start, 0, end - start);
59         dma_wbinv_range(start, end);
60 }
61
62 void arch_dma_prep_coherent(struct page *page, size_t size)
63 {
64         cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
65 }
66
67 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
68                               size_t size, enum dma_data_direction dir)
69 {
70         switch (dir) {
71         case DMA_TO_DEVICE:
72                 cache_op(paddr, size, dma_wb_range);
73                 break;
74         case DMA_FROM_DEVICE:
75         case DMA_BIDIRECTIONAL:
76                 cache_op(paddr, size, dma_wbinv_range);
77                 break;
78         default:
79                 BUG();
80         }
81 }
82
83 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
84                            size_t size, enum dma_data_direction dir)
85 {
86         switch (dir) {
87         case DMA_TO_DEVICE:
88                 return;
89         case DMA_FROM_DEVICE:
90         case DMA_BIDIRECTIONAL:
91                 cache_op(paddr, size, dma_inv_range);
92                 break;
93         default:
94                 BUG();
95         }
96 }