Merge branch 'WIP.x86/fpu' into x86/fpu, because it's ready
[linux-2.6-microblaze.git] / arch / microblaze / kernel / dma.c
1 /*
2  * Copyright (C) 2009-2010 PetaLogix
3  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * Provide default implementations of the DMA mapping callbacks for
6  * directly mapped busses.
7  */
8
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <linux/bug.h>
15
16 #define NOT_COHERENT_CACHE
17
18 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
19                                        dma_addr_t *dma_handle, gfp_t flag,
20                                        unsigned long attrs)
21 {
22 #ifdef NOT_COHERENT_CACHE
23         return consistent_alloc(flag, size, dma_handle);
24 #else
25         void *ret;
26         struct page *page;
27         int node = dev_to_node(dev);
28
29         /* ignore region specifiers */
30         flag  &= ~(__GFP_HIGHMEM);
31
32         page = alloc_pages_node(node, flag, get_order(size));
33         if (page == NULL)
34                 return NULL;
35         ret = page_address(page);
36         memset(ret, 0, size);
37         *dma_handle = virt_to_phys(ret);
38
39         return ret;
40 #endif
41 }
42
43 static void dma_direct_free_coherent(struct device *dev, size_t size,
44                                      void *vaddr, dma_addr_t dma_handle,
45                                      unsigned long attrs)
46 {
47 #ifdef NOT_COHERENT_CACHE
48         consistent_free(size, vaddr);
49 #else
50         free_pages((unsigned long)vaddr, get_order(size));
51 #endif
52 }
53
54 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
55                              int nents, enum dma_data_direction direction,
56                              unsigned long attrs)
57 {
58         struct scatterlist *sg;
59         int i;
60
61         /* FIXME this part of code is untested */
62         for_each_sg(sgl, sg, nents, i) {
63                 sg->dma_address = sg_phys(sg);
64
65                 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
66                         continue;
67
68                 __dma_sync(sg_phys(sg), sg->length, direction);
69         }
70
71         return nents;
72 }
73
74 static int dma_direct_dma_supported(struct device *dev, u64 mask)
75 {
76         return 1;
77 }
78
79 static inline dma_addr_t dma_direct_map_page(struct device *dev,
80                                              struct page *page,
81                                              unsigned long offset,
82                                              size_t size,
83                                              enum dma_data_direction direction,
84                                              unsigned long attrs)
85 {
86         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
87                 __dma_sync(page_to_phys(page) + offset, size, direction);
88         return page_to_phys(page) + offset;
89 }
90
91 static inline void dma_direct_unmap_page(struct device *dev,
92                                          dma_addr_t dma_address,
93                                          size_t size,
94                                          enum dma_data_direction direction,
95                                          unsigned long attrs)
96 {
97 /* There is not necessary to do cache cleanup
98  *
99  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
100  * dma_address is physical address
101  */
102         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
103                 __dma_sync(dma_address, size, direction);
104 }
105
106 static inline void
107 dma_direct_sync_single_for_cpu(struct device *dev,
108                                dma_addr_t dma_handle, size_t size,
109                                enum dma_data_direction direction)
110 {
111         /*
112          * It's pointless to flush the cache as the memory segment
113          * is given to the CPU
114          */
115
116         if (direction == DMA_FROM_DEVICE)
117                 __dma_sync(dma_handle, size, direction);
118 }
119
120 static inline void
121 dma_direct_sync_single_for_device(struct device *dev,
122                                   dma_addr_t dma_handle, size_t size,
123                                   enum dma_data_direction direction)
124 {
125         /*
126          * It's pointless to invalidate the cache if the device isn't
127          * supposed to write to the relevant region
128          */
129
130         if (direction == DMA_TO_DEVICE)
131                 __dma_sync(dma_handle, size, direction);
132 }
133
134 static inline void
135 dma_direct_sync_sg_for_cpu(struct device *dev,
136                            struct scatterlist *sgl, int nents,
137                            enum dma_data_direction direction)
138 {
139         struct scatterlist *sg;
140         int i;
141
142         /* FIXME this part of code is untested */
143         if (direction == DMA_FROM_DEVICE)
144                 for_each_sg(sgl, sg, nents, i)
145                         __dma_sync(sg->dma_address, sg->length, direction);
146 }
147
148 static inline void
149 dma_direct_sync_sg_for_device(struct device *dev,
150                               struct scatterlist *sgl, int nents,
151                               enum dma_data_direction direction)
152 {
153         struct scatterlist *sg;
154         int i;
155
156         /* FIXME this part of code is untested */
157         if (direction == DMA_TO_DEVICE)
158                 for_each_sg(sgl, sg, nents, i)
159                         __dma_sync(sg->dma_address, sg->length, direction);
160 }
161
162 static
163 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
164                              void *cpu_addr, dma_addr_t handle, size_t size,
165                              unsigned long attrs)
166 {
167 #ifdef CONFIG_MMU
168         unsigned long user_count = vma_pages(vma);
169         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
170         unsigned long off = vma->vm_pgoff;
171         unsigned long pfn;
172
173         if (off >= count || user_count > (count - off))
174                 return -ENXIO;
175
176 #ifdef NOT_COHERENT_CACHE
177         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
178         pfn = consistent_virt_to_pfn(cpu_addr);
179 #else
180         pfn = virt_to_pfn(cpu_addr);
181 #endif
182         return remap_pfn_range(vma, vma->vm_start, pfn + off,
183                                vma->vm_end - vma->vm_start, vma->vm_page_prot);
184 #else
185         return -ENXIO;
186 #endif
187 }
188
189 const struct dma_map_ops dma_direct_ops = {
190         .alloc          = dma_direct_alloc_coherent,
191         .free           = dma_direct_free_coherent,
192         .mmap           = dma_direct_mmap_coherent,
193         .map_sg         = dma_direct_map_sg,
194         .dma_supported  = dma_direct_dma_supported,
195         .map_page       = dma_direct_map_page,
196         .unmap_page     = dma_direct_unmap_page,
197         .sync_single_for_cpu            = dma_direct_sync_single_for_cpu,
198         .sync_single_for_device         = dma_direct_sync_single_for_device,
199         .sync_sg_for_cpu                = dma_direct_sync_sg_for_cpu,
200         .sync_sg_for_device             = dma_direct_sync_sg_for_device,
201 };
202 EXPORT_SYMBOL(dma_direct_ops);
203
204 /* Number of entries preallocated for DMA-API debugging */
205 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
206
207 static int __init dma_init(void)
208 {
209         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
210
211         return 0;
212 }
213 fs_initcall(dma_init);