2 * Copyright (C) 2004 - 2007 Paul Mundt
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
9 #include <linux/init.h>
10 #include <linux/dma-noncoherent.h>
11 #include <linux/module.h>
12 #include <asm/cacheflush.h>
13 #include <asm/addrspace.h>
15 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
16 gfp_t gfp, unsigned long attrs)
18 void *ret, *ret_nocache;
19 int order = get_order(size);
23 ret = (void *)__get_free_pages(gfp, order);
28 * Pages from the page allocator may have data present in
29 * cache. So flush the cache before using uncached memory.
31 arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
34 ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
36 free_pages((unsigned long)ret, order);
40 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
42 *dma_handle = virt_to_phys(ret);
44 *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
49 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
50 dma_addr_t dma_handle, unsigned long attrs)
52 int order = get_order(size);
53 unsigned long pfn = (dma_handle >> PAGE_SHIFT);
57 pfn += dev->dma_pfn_offset;
59 for (k = 0; k < (1 << order); k++)
60 __free_pages(pfn_to_page(pfn + k), 0);
65 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
66 size_t size, enum dma_data_direction dir)
68 void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
71 case DMA_FROM_DEVICE: /* invalidate only */
72 __flush_invalidate_region(addr, size);
74 case DMA_TO_DEVICE: /* writeback only */
75 __flush_wback_region(addr, size);
77 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
78 __flush_purge_region(addr, size);