netfilter: netns: shrink netns_ct struct
[linux-2.6-microblaze.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/memblock.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-noncoherent.h>
29 #include <linux/dma-contiguous.h>
30 #include <linux/vmalloc.h>
31 #include <linux/swiotlb.h>
32 #include <linux/pci.h>
33
34 #include <asm/cacheflush.h>
35
36 static struct gen_pool *atomic_pool __ro_after_init;
37
38 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
39 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
40
41 static int __init early_coherent_pool(char *p)
42 {
43         atomic_pool_size = memparse(p, &p);
44         return 0;
45 }
46 early_param("coherent_pool", early_coherent_pool);
47
48 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
49 {
50         unsigned long val;
51         void *ptr = NULL;
52
53         if (!atomic_pool) {
54                 WARN(1, "coherent pool not initialised!\n");
55                 return NULL;
56         }
57
58         val = gen_pool_alloc(atomic_pool, size);
59         if (val) {
60                 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
61
62                 *ret_page = phys_to_page(phys);
63                 ptr = (void *)val;
64                 memset(ptr, 0, size);
65         }
66
67         return ptr;
68 }
69
70 static bool __in_atomic_pool(void *start, size_t size)
71 {
72         return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
73 }
74
75 static int __free_from_pool(void *start, size_t size)
76 {
77         if (!__in_atomic_pool(start, size))
78                 return 0;
79
80         gen_pool_free(atomic_pool, (unsigned long)start, size);
81
82         return 1;
83 }
84
85 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
86                 gfp_t flags, unsigned long attrs)
87 {
88         struct page *page;
89         void *ptr, *coherent_ptr;
90         pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
91
92         size = PAGE_ALIGN(size);
93
94         if (!gfpflags_allow_blocking(flags)) {
95                 struct page *page = NULL;
96                 void *addr = __alloc_from_pool(size, &page, flags);
97
98                 if (addr)
99                         *dma_handle = phys_to_dma(dev, page_to_phys(page));
100
101                 return addr;
102         }
103
104         ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
105         if (!ptr)
106                 goto no_mem;
107
108         /* remove any dirty cache lines on the kernel alias */
109         __dma_flush_area(ptr, size);
110
111         /* create a coherent mapping */
112         page = virt_to_page(ptr);
113         coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
114                                                    prot, __builtin_return_address(0));
115         if (!coherent_ptr)
116                 goto no_map;
117
118         return coherent_ptr;
119
120 no_map:
121         dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
122 no_mem:
123         return NULL;
124 }
125
126 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
127                 dma_addr_t dma_handle, unsigned long attrs)
128 {
129         if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
130                 void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
131
132                 vunmap(vaddr);
133                 dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
134         }
135 }
136
137 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
138                 dma_addr_t dma_addr)
139 {
140         return __phys_to_pfn(dma_to_phys(dev, dma_addr));
141 }
142
143 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
144                 unsigned long attrs)
145 {
146         if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
147                 return pgprot_writecombine(prot);
148         return prot;
149 }
150
151 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
152                 size_t size, enum dma_data_direction dir)
153 {
154         __dma_map_area(phys_to_virt(paddr), size, dir);
155 }
156
157 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
158                 size_t size, enum dma_data_direction dir)
159 {
160         __dma_unmap_area(phys_to_virt(paddr), size, dir);
161 }
162
163 #ifdef CONFIG_IOMMU_DMA
164 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
165                                       struct page *page, size_t size)
166 {
167         int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
168
169         if (!ret)
170                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
171
172         return ret;
173 }
174
175 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
176                               unsigned long pfn, size_t size)
177 {
178         int ret = -ENXIO;
179         unsigned long nr_vma_pages = vma_pages(vma);
180         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
181         unsigned long off = vma->vm_pgoff;
182
183         if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
184                 ret = remap_pfn_range(vma, vma->vm_start,
185                                       pfn + off,
186                                       vma->vm_end - vma->vm_start,
187                                       vma->vm_page_prot);
188         }
189
190         return ret;
191 }
192 #endif /* CONFIG_IOMMU_DMA */
193
194 static int __init atomic_pool_init(void)
195 {
196         pgprot_t prot = __pgprot(PROT_NORMAL_NC);
197         unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
198         struct page *page;
199         void *addr;
200         unsigned int pool_size_order = get_order(atomic_pool_size);
201
202         if (dev_get_cma_area(NULL))
203                 page = dma_alloc_from_contiguous(NULL, nr_pages,
204                                                  pool_size_order, false);
205         else
206                 page = alloc_pages(GFP_DMA32, pool_size_order);
207
208         if (page) {
209                 int ret;
210                 void *page_addr = page_address(page);
211
212                 memset(page_addr, 0, atomic_pool_size);
213                 __dma_flush_area(page_addr, atomic_pool_size);
214
215                 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
216                 if (!atomic_pool)
217                         goto free_page;
218
219                 addr = dma_common_contiguous_remap(page, atomic_pool_size,
220                                         VM_USERMAP, prot, atomic_pool_init);
221
222                 if (!addr)
223                         goto destroy_genpool;
224
225                 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
226                                         page_to_phys(page),
227                                         atomic_pool_size, -1);
228                 if (ret)
229                         goto remove_mapping;
230
231                 gen_pool_set_algo(atomic_pool,
232                                   gen_pool_first_fit_order_align,
233                                   NULL);
234
235                 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
236                         atomic_pool_size / 1024);
237                 return 0;
238         }
239         goto out;
240
241 remove_mapping:
242         dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
243 destroy_genpool:
244         gen_pool_destroy(atomic_pool);
245         atomic_pool = NULL;
246 free_page:
247         if (!dma_release_from_contiguous(NULL, page, nr_pages))
248                 __free_pages(page, pool_size_order);
249 out:
250         pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
251                 atomic_pool_size / 1024);
252         return -ENOMEM;
253 }
254
255 /********************************************
256  * The following APIs are for dummy DMA ops *
257  ********************************************/
258
259 static void *__dummy_alloc(struct device *dev, size_t size,
260                            dma_addr_t *dma_handle, gfp_t flags,
261                            unsigned long attrs)
262 {
263         return NULL;
264 }
265
266 static void __dummy_free(struct device *dev, size_t size,
267                          void *vaddr, dma_addr_t dma_handle,
268                          unsigned long attrs)
269 {
270 }
271
272 static int __dummy_mmap(struct device *dev,
273                         struct vm_area_struct *vma,
274                         void *cpu_addr, dma_addr_t dma_addr, size_t size,
275                         unsigned long attrs)
276 {
277         return -ENXIO;
278 }
279
280 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
281                                    unsigned long offset, size_t size,
282                                    enum dma_data_direction dir,
283                                    unsigned long attrs)
284 {
285         return 0;
286 }
287
288 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
289                                size_t size, enum dma_data_direction dir,
290                                unsigned long attrs)
291 {
292 }
293
294 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
295                           int nelems, enum dma_data_direction dir,
296                           unsigned long attrs)
297 {
298         return 0;
299 }
300
301 static void __dummy_unmap_sg(struct device *dev,
302                              struct scatterlist *sgl, int nelems,
303                              enum dma_data_direction dir,
304                              unsigned long attrs)
305 {
306 }
307
308 static void __dummy_sync_single(struct device *dev,
309                                 dma_addr_t dev_addr, size_t size,
310                                 enum dma_data_direction dir)
311 {
312 }
313
314 static void __dummy_sync_sg(struct device *dev,
315                             struct scatterlist *sgl, int nelems,
316                             enum dma_data_direction dir)
317 {
318 }
319
320 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
321 {
322         return 1;
323 }
324
325 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
326 {
327         return 0;
328 }
329
330 const struct dma_map_ops dummy_dma_ops = {
331         .alloc                  = __dummy_alloc,
332         .free                   = __dummy_free,
333         .mmap                   = __dummy_mmap,
334         .map_page               = __dummy_map_page,
335         .unmap_page             = __dummy_unmap_page,
336         .map_sg                 = __dummy_map_sg,
337         .unmap_sg               = __dummy_unmap_sg,
338         .sync_single_for_cpu    = __dummy_sync_single,
339         .sync_single_for_device = __dummy_sync_single,
340         .sync_sg_for_cpu        = __dummy_sync_sg,
341         .sync_sg_for_device     = __dummy_sync_sg,
342         .mapping_error          = __dummy_mapping_error,
343         .dma_supported          = __dummy_dma_supported,
344 };
345 EXPORT_SYMBOL(dummy_dma_ops);
346
347 static int __init arm64_dma_init(void)
348 {
349         WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
350                    TAINT_CPU_OUT_OF_SPEC,
351                    "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
352                    ARCH_DMA_MINALIGN, cache_line_size());
353
354         return atomic_pool_init();
355 }
356 arch_initcall(arm64_dma_init);
357
358 #ifdef CONFIG_IOMMU_DMA
359 #include <linux/dma-iommu.h>
360 #include <linux/platform_device.h>
361 #include <linux/amba/bus.h>
362
363 /* Thankfully, all cache ops are by VA so we can ignore phys here */
364 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
365 {
366         __dma_flush_area(virt, PAGE_SIZE);
367 }
368
369 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
370                                  dma_addr_t *handle, gfp_t gfp,
371                                  unsigned long attrs)
372 {
373         bool coherent = dev_is_dma_coherent(dev);
374         int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
375         size_t iosize = size;
376         void *addr;
377
378         if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
379                 return NULL;
380
381         size = PAGE_ALIGN(size);
382
383         /*
384          * Some drivers rely on this, and we probably don't want the
385          * possibility of stale kernel data being read by devices anyway.
386          */
387         gfp |= __GFP_ZERO;
388
389         if (!gfpflags_allow_blocking(gfp)) {
390                 struct page *page;
391                 /*
392                  * In atomic context we can't remap anything, so we'll only
393                  * get the virtually contiguous buffer we need by way of a
394                  * physically contiguous allocation.
395                  */
396                 if (coherent) {
397                         page = alloc_pages(gfp, get_order(size));
398                         addr = page ? page_address(page) : NULL;
399                 } else {
400                         addr = __alloc_from_pool(size, &page, gfp);
401                 }
402                 if (!addr)
403                         return NULL;
404
405                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
406                 if (iommu_dma_mapping_error(dev, *handle)) {
407                         if (coherent)
408                                 __free_pages(page, get_order(size));
409                         else
410                                 __free_from_pool(addr, size);
411                         addr = NULL;
412                 }
413         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
414                 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
415                 struct page *page;
416
417                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
418                                         get_order(size), gfp & __GFP_NOWARN);
419                 if (!page)
420                         return NULL;
421
422                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
423                 if (iommu_dma_mapping_error(dev, *handle)) {
424                         dma_release_from_contiguous(dev, page,
425                                                     size >> PAGE_SHIFT);
426                         return NULL;
427                 }
428                 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
429                                                    prot,
430                                                    __builtin_return_address(0));
431                 if (addr) {
432                         memset(addr, 0, size);
433                         if (!coherent)
434                                 __dma_flush_area(page_to_virt(page), iosize);
435                 } else {
436                         iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
437                         dma_release_from_contiguous(dev, page,
438                                                     size >> PAGE_SHIFT);
439                 }
440         } else {
441                 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
442                 struct page **pages;
443
444                 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
445                                         handle, flush_page);
446                 if (!pages)
447                         return NULL;
448
449                 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
450                                               __builtin_return_address(0));
451                 if (!addr)
452                         iommu_dma_free(dev, pages, iosize, handle);
453         }
454         return addr;
455 }
456
457 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
458                                dma_addr_t handle, unsigned long attrs)
459 {
460         size_t iosize = size;
461
462         size = PAGE_ALIGN(size);
463         /*
464          * @cpu_addr will be one of 4 things depending on how it was allocated:
465          * - A remapped array of pages for contiguous allocations.
466          * - A remapped array of pages from iommu_dma_alloc(), for all
467          *   non-atomic allocations.
468          * - A non-cacheable alias from the atomic pool, for atomic
469          *   allocations by non-coherent devices.
470          * - A normal lowmem address, for atomic allocations by
471          *   coherent devices.
472          * Hence how dodgy the below logic looks...
473          */
474         if (__in_atomic_pool(cpu_addr, size)) {
475                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
476                 __free_from_pool(cpu_addr, size);
477         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
478                 struct page *page = vmalloc_to_page(cpu_addr);
479
480                 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
481                 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
482                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
483         } else if (is_vmalloc_addr(cpu_addr)){
484                 struct vm_struct *area = find_vm_area(cpu_addr);
485
486                 if (WARN_ON(!area || !area->pages))
487                         return;
488                 iommu_dma_free(dev, area->pages, iosize, &handle);
489                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
490         } else {
491                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
492                 __free_pages(virt_to_page(cpu_addr), get_order(size));
493         }
494 }
495
496 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
497                               void *cpu_addr, dma_addr_t dma_addr, size_t size,
498                               unsigned long attrs)
499 {
500         struct vm_struct *area;
501         int ret;
502
503         vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
504
505         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
506                 return ret;
507
508         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
509                 /*
510                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
511                  * hence in the vmalloc space.
512                  */
513                 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
514                 return __swiotlb_mmap_pfn(vma, pfn, size);
515         }
516
517         area = find_vm_area(cpu_addr);
518         if (WARN_ON(!area || !area->pages))
519                 return -ENXIO;
520
521         return iommu_dma_mmap(area->pages, size, vma);
522 }
523
524 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
525                                void *cpu_addr, dma_addr_t dma_addr,
526                                size_t size, unsigned long attrs)
527 {
528         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
529         struct vm_struct *area = find_vm_area(cpu_addr);
530
531         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
532                 /*
533                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
534                  * hence in the vmalloc space.
535                  */
536                 struct page *page = vmalloc_to_page(cpu_addr);
537                 return __swiotlb_get_sgtable_page(sgt, page, size);
538         }
539
540         if (WARN_ON(!area || !area->pages))
541                 return -ENXIO;
542
543         return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
544                                          GFP_KERNEL);
545 }
546
547 static void __iommu_sync_single_for_cpu(struct device *dev,
548                                         dma_addr_t dev_addr, size_t size,
549                                         enum dma_data_direction dir)
550 {
551         phys_addr_t phys;
552
553         if (dev_is_dma_coherent(dev))
554                 return;
555
556         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
557         arch_sync_dma_for_cpu(dev, phys, size, dir);
558 }
559
560 static void __iommu_sync_single_for_device(struct device *dev,
561                                            dma_addr_t dev_addr, size_t size,
562                                            enum dma_data_direction dir)
563 {
564         phys_addr_t phys;
565
566         if (dev_is_dma_coherent(dev))
567                 return;
568
569         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
570         arch_sync_dma_for_device(dev, phys, size, dir);
571 }
572
573 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
574                                    unsigned long offset, size_t size,
575                                    enum dma_data_direction dir,
576                                    unsigned long attrs)
577 {
578         bool coherent = dev_is_dma_coherent(dev);
579         int prot = dma_info_to_prot(dir, coherent, attrs);
580         dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
581
582         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
583             !iommu_dma_mapping_error(dev, dev_addr))
584                 __dma_map_area(page_address(page) + offset, size, dir);
585
586         return dev_addr;
587 }
588
589 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
590                                size_t size, enum dma_data_direction dir,
591                                unsigned long attrs)
592 {
593         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
594                 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
595
596         iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
597 }
598
599 static void __iommu_sync_sg_for_cpu(struct device *dev,
600                                     struct scatterlist *sgl, int nelems,
601                                     enum dma_data_direction dir)
602 {
603         struct scatterlist *sg;
604         int i;
605
606         if (dev_is_dma_coherent(dev))
607                 return;
608
609         for_each_sg(sgl, sg, nelems, i)
610                 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
611 }
612
613 static void __iommu_sync_sg_for_device(struct device *dev,
614                                        struct scatterlist *sgl, int nelems,
615                                        enum dma_data_direction dir)
616 {
617         struct scatterlist *sg;
618         int i;
619
620         if (dev_is_dma_coherent(dev))
621                 return;
622
623         for_each_sg(sgl, sg, nelems, i)
624                 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
625 }
626
627 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
628                                 int nelems, enum dma_data_direction dir,
629                                 unsigned long attrs)
630 {
631         bool coherent = dev_is_dma_coherent(dev);
632
633         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
634                 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
635
636         return iommu_dma_map_sg(dev, sgl, nelems,
637                                 dma_info_to_prot(dir, coherent, attrs));
638 }
639
640 static void __iommu_unmap_sg_attrs(struct device *dev,
641                                    struct scatterlist *sgl, int nelems,
642                                    enum dma_data_direction dir,
643                                    unsigned long attrs)
644 {
645         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
646                 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
647
648         iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
649 }
650
651 static const struct dma_map_ops iommu_dma_ops = {
652         .alloc = __iommu_alloc_attrs,
653         .free = __iommu_free_attrs,
654         .mmap = __iommu_mmap_attrs,
655         .get_sgtable = __iommu_get_sgtable,
656         .map_page = __iommu_map_page,
657         .unmap_page = __iommu_unmap_page,
658         .map_sg = __iommu_map_sg_attrs,
659         .unmap_sg = __iommu_unmap_sg_attrs,
660         .sync_single_for_cpu = __iommu_sync_single_for_cpu,
661         .sync_single_for_device = __iommu_sync_single_for_device,
662         .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
663         .sync_sg_for_device = __iommu_sync_sg_for_device,
664         .map_resource = iommu_dma_map_resource,
665         .unmap_resource = iommu_dma_unmap_resource,
666         .mapping_error = iommu_dma_mapping_error,
667 };
668
669 static int __init __iommu_dma_init(void)
670 {
671         return iommu_dma_init();
672 }
673 arch_initcall(__iommu_dma_init);
674
675 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
676                                   const struct iommu_ops *ops)
677 {
678         struct iommu_domain *domain;
679
680         if (!ops)
681                 return;
682
683         /*
684          * The IOMMU core code allocates the default DMA domain, which the
685          * underlying IOMMU driver needs to support via the dma-iommu layer.
686          */
687         domain = iommu_get_domain_for_dev(dev);
688
689         if (!domain)
690                 goto out_err;
691
692         if (domain->type == IOMMU_DOMAIN_DMA) {
693                 if (iommu_dma_init_domain(domain, dma_base, size, dev))
694                         goto out_err;
695
696                 dev->dma_ops = &iommu_dma_ops;
697         }
698
699         return;
700
701 out_err:
702          pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
703                  dev_name(dev));
704 }
705
706 void arch_teardown_dma_ops(struct device *dev)
707 {
708         dev->dma_ops = NULL;
709 }
710
711 #else
712
713 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
714                                   const struct iommu_ops *iommu)
715 { }
716
717 #endif  /* CONFIG_IOMMU_DMA */
718
719 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
720                         const struct iommu_ops *iommu, bool coherent)
721 {
722         if (!dev->dma_ops)
723                 dev->dma_ops = &swiotlb_dma_ops;
724
725         dev->dma_coherent = coherent;
726         __iommu_setup_dma_ops(dev, dma_base, size, iommu);
727
728 #ifdef CONFIG_XEN
729         if (xen_initial_domain()) {
730                 dev->archdata.dev_dma_ops = dev->dma_ops;
731                 dev->dma_ops = xen_dma_ops;
732         }
733 #endif
734 }