{
unsigned long prot = pgprot_val(_prot);
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
+
+ return __pgprot(prot);
+ }
+
+ #define pgprot_writecombine pgprot_writecombine
+ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+ {
+ unsigned long prot = pgprot_val(_prot);
+
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
return __pgprot(prot);
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do {} while (0)
-
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#include <linux/version.h>
#include <asm/cache.h>
- void arch_dma_prep_coherent(struct page *page, size_t size)
-static int __init atomic_pool_init(void)
--{
- if (PageHighMem(page)) {
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- do {
- void *ptr = kmap_atomic(page);
- size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
-
- memset(ptr, 0, _size);
- dma_wbinv_range((unsigned long)ptr,
- (unsigned long)ptr + _size);
-
- kunmap_atomic(ptr);
-
- page++;
- size -= PAGE_SIZE;
- count--;
- } while (count);
- } else {
- void *ptr = page_address(page);
-
- memset(ptr, 0, size);
- dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
- }
- return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
--}
-postcore_initcall(atomic_pool_init);
--
static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end))
{
- struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
- unsigned int offset = paddr & ~PAGE_MASK;
- size_t left = size;
- unsigned long start;
+ struct page *page = phys_to_page(paddr);
+ void *start = __va(page_to_phys(page));
+ unsigned long offset = offset_in_page(paddr);
+ size_t left = size;
do {
size_t len = left;
+ if (offset + len > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
if (PageHighMem(page)) {
- void *addr;
+ start = kmap_atomic(page);
- if (offset + len > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
- }
- len = PAGE_SIZE - offset;
- }
+ fn((unsigned long)start + offset,
+ (unsigned long)start + offset + len);
- addr = kmap_atomic(page);
- start = (unsigned long)(addr + offset);
- fn(start, start + len);
- kunmap_atomic(addr);
+ kunmap_atomic(start);
} else {
- start = (unsigned long)phys_to_virt(paddr);
- fn(start, start + size);
+ fn((unsigned long)start + offset,
+ (unsigned long)start + offset + len);
}
offset = 0;
+
page++;
+ start += PAGE_SIZE;
left -= len;
} while (left);
}
+ static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
+ {
+ memset((void *)start, 0, end - start);
+ dma_wbinv_range(start, end);
+ }
+
+ void arch_dma_prep_coherent(struct page *page, size_t size)
+ {
+ cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
+ }
+
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
{
switch (dir) {
case DMA_TO_DEVICE:
- cache_op(paddr, size, dma_wb_range);
- break;
+ return;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
- cache_op(paddr, size, dma_wbinv_range);
+ cache_op(paddr, size, dma_inv_range);
break;
default:
BUG();