dma-mapping: merge <linux/dma-contiguous.h> into <linux/dma-map-ops.h>
[linux-2.6-microblaze.git] / arch / csky / mm / dma-mapping.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4 #include <linux/cache.h>
5 #include <linux/dma-map-ops.h>
6 #include <linux/dma-noncoherent.h>
7 #include <linux/genalloc.h>
8 #include <linux/highmem.h>
9 #include <linux/io.h>
10 #include <linux/mm.h>
11 #include <linux/scatterlist.h>
12 #include <linux/types.h>
13 #include <linux/version.h>
14 #include <asm/cache.h>
15
16 static inline void cache_op(phys_addr_t paddr, size_t size,
17                             void (*fn)(unsigned long start, unsigned long end))
18 {
19         struct page *page    = phys_to_page(paddr);
20         void *start          = __va(page_to_phys(page));
21         unsigned long offset = offset_in_page(paddr);
22         size_t left          = size;
23
24         do {
25                 size_t len = left;
26
27                 if (offset + len > PAGE_SIZE)
28                         len = PAGE_SIZE - offset;
29
30                 if (PageHighMem(page)) {
31                         start = kmap_atomic(page);
32
33                         fn((unsigned long)start + offset,
34                                         (unsigned long)start + offset + len);
35
36                         kunmap_atomic(start);
37                 } else {
38                         fn((unsigned long)start + offset,
39                                         (unsigned long)start + offset + len);
40                 }
41                 offset = 0;
42
43                 page++;
44                 start += PAGE_SIZE;
45                 left -= len;
46         } while (left);
47 }
48
49 static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
50 {
51         memset((void *)start, 0, end - start);
52         dma_wbinv_range(start, end);
53 }
54
55 void arch_dma_prep_coherent(struct page *page, size_t size)
56 {
57         cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
58 }
59
60 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
61                 enum dma_data_direction dir)
62 {
63         switch (dir) {
64         case DMA_TO_DEVICE:
65                 cache_op(paddr, size, dma_wb_range);
66                 break;
67         case DMA_FROM_DEVICE:
68         case DMA_BIDIRECTIONAL:
69                 cache_op(paddr, size, dma_wbinv_range);
70                 break;
71         default:
72                 BUG();
73         }
74 }
75
76 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
77                 enum dma_data_direction dir)
78 {
79         switch (dir) {
80         case DMA_TO_DEVICE:
81                 return;
82         case DMA_FROM_DEVICE:
83         case DMA_BIDIRECTIONAL:
84                 cache_op(paddr, size, dma_inv_range);
85                 break;
86         default:
87                 BUG();
88         }
89 }