Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux-2.6-microblaze.git] / arch / parisc / kernel / pci-dma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 ** PARISC 1.1 Dynamic DMA mapping support.
4 ** This implementation is for PA-RISC platforms that do not support
5 ** I/O TLBs (aka DMA address translation hardware).
6 ** See Documentation/DMA-API-HOWTO.txt for interface definitions.
7 **
8 **      (c) Copyright 1999,2000 Hewlett-Packard Company
9 **      (c) Copyright 2000 Grant Grundler
10 **      (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
11 **      (c) Copyright 2000 John Marvin
12 **
13 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
14 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 **
16 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17 **
18 ** - ggg
19 */
20
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/mm.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/scatterlist.h>
30 #include <linux/export.h>
31
32 #include <asm/cacheflush.h>
33 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
34 #include <asm/io.h>
35 #include <asm/page.h>   /* get_order */
36 #include <asm/pgalloc.h>
37 #include <linux/uaccess.h>
38 #include <asm/tlbflush.h>       /* for purge_tlb_*() macros */
39
40 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
41 static unsigned long pcxl_used_bytes __read_mostly = 0;
42 static unsigned long pcxl_used_pages __read_mostly = 0;
43
44 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
45 static DEFINE_SPINLOCK(pcxl_res_lock);
46 static char    *pcxl_res_map;
47 static int     pcxl_res_hint;
48 static int     pcxl_res_size;
49
50 #ifdef DEBUG_PCXL_RESOURCE
51 #define DBG_RES(x...)   printk(x)
52 #else
53 #define DBG_RES(x...)
54 #endif
55
56
57 /*
58 ** Dump a hex representation of the resource map.
59 */
60
61 #ifdef DUMP_RESMAP
62 static
63 void dump_resmap(void)
64 {
65         u_long *res_ptr = (unsigned long *)pcxl_res_map;
66         u_long i = 0;
67
68         printk("res_map: ");
69         for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
70                 printk("%08lx ", *res_ptr);
71
72         printk("\n");
73 }
74 #else
75 static inline void dump_resmap(void) {;}
76 #endif
77
78 static inline int map_pte_uncached(pte_t * pte,
79                 unsigned long vaddr,
80                 unsigned long size, unsigned long *paddr_ptr)
81 {
82         unsigned long end;
83         unsigned long orig_vaddr = vaddr;
84
85         vaddr &= ~PMD_MASK;
86         end = vaddr + size;
87         if (end > PMD_SIZE)
88                 end = PMD_SIZE;
89         do {
90                 unsigned long flags;
91
92                 if (!pte_none(*pte))
93                         printk(KERN_ERR "map_pte_uncached: page already exists\n");
94                 purge_tlb_start(flags);
95                 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
96                 pdtlb_kernel(orig_vaddr);
97                 purge_tlb_end(flags);
98                 vaddr += PAGE_SIZE;
99                 orig_vaddr += PAGE_SIZE;
100                 (*paddr_ptr) += PAGE_SIZE;
101                 pte++;
102         } while (vaddr < end);
103         return 0;
104 }
105
106 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
107                 unsigned long size, unsigned long *paddr_ptr)
108 {
109         unsigned long end;
110         unsigned long orig_vaddr = vaddr;
111
112         vaddr &= ~PGDIR_MASK;
113         end = vaddr + size;
114         if (end > PGDIR_SIZE)
115                 end = PGDIR_SIZE;
116         do {
117                 pte_t * pte = pte_alloc_kernel(pmd, vaddr);
118                 if (!pte)
119                         return -ENOMEM;
120                 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
121                         return -ENOMEM;
122                 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
123                 orig_vaddr += PMD_SIZE;
124                 pmd++;
125         } while (vaddr < end);
126         return 0;
127 }
128
129 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
130                 unsigned long paddr)
131 {
132         pgd_t * dir;
133         unsigned long end = vaddr + size;
134
135         dir = pgd_offset_k(vaddr);
136         do {
137                 pmd_t *pmd;
138                 
139                 pmd = pmd_alloc(NULL, dir, vaddr);
140                 if (!pmd)
141                         return -ENOMEM;
142                 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
143                         return -ENOMEM;
144                 vaddr = vaddr + PGDIR_SIZE;
145                 dir++;
146         } while (vaddr && (vaddr < end));
147         return 0;
148 }
149
150 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
151                 unsigned long size)
152 {
153         pte_t * pte;
154         unsigned long end;
155         unsigned long orig_vaddr = vaddr;
156
157         if (pmd_none(*pmd))
158                 return;
159         if (pmd_bad(*pmd)) {
160                 pmd_ERROR(*pmd);
161                 pmd_clear(pmd);
162                 return;
163         }
164         pte = pte_offset_map(pmd, vaddr);
165         vaddr &= ~PMD_MASK;
166         end = vaddr + size;
167         if (end > PMD_SIZE)
168                 end = PMD_SIZE;
169         do {
170                 unsigned long flags;
171                 pte_t page = *pte;
172
173                 pte_clear(&init_mm, vaddr, pte);
174                 purge_tlb_start(flags);
175                 pdtlb_kernel(orig_vaddr);
176                 purge_tlb_end(flags);
177                 vaddr += PAGE_SIZE;
178                 orig_vaddr += PAGE_SIZE;
179                 pte++;
180                 if (pte_none(page) || pte_present(page))
181                         continue;
182                 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
183         } while (vaddr < end);
184 }
185
186 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
187                 unsigned long size)
188 {
189         pmd_t * pmd;
190         unsigned long end;
191         unsigned long orig_vaddr = vaddr;
192
193         if (pgd_none(*dir))
194                 return;
195         if (pgd_bad(*dir)) {
196                 pgd_ERROR(*dir);
197                 pgd_clear(dir);
198                 return;
199         }
200         pmd = pmd_offset(dir, vaddr);
201         vaddr &= ~PGDIR_MASK;
202         end = vaddr + size;
203         if (end > PGDIR_SIZE)
204                 end = PGDIR_SIZE;
205         do {
206                 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
207                 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
208                 orig_vaddr += PMD_SIZE;
209                 pmd++;
210         } while (vaddr < end);
211 }
212
213 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
214 {
215         pgd_t * dir;
216         unsigned long end = vaddr + size;
217
218         dir = pgd_offset_k(vaddr);
219         do {
220                 unmap_uncached_pmd(dir, vaddr, end - vaddr);
221                 vaddr = vaddr + PGDIR_SIZE;
222                 dir++;
223         } while (vaddr && (vaddr < end));
224 }
225
226 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
227        for(; res_ptr < res_end; ++res_ptr) \
228        { \
229                if(0 == ((*res_ptr) & mask)) { \
230                        *res_ptr |= mask; \
231                        idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
232                        pcxl_res_hint = idx + (size >> 3); \
233                        goto resource_found; \
234                } \
235        }
236
237 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
238        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
239        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
240        PCXL_SEARCH_LOOP(idx, mask, size); \
241        res_ptr = (u##size *)&pcxl_res_map[0]; \
242        PCXL_SEARCH_LOOP(idx, mask, size); \
243 }
244
245 unsigned long
246 pcxl_alloc_range(size_t size)
247 {
248         int res_idx;
249         u_long mask, flags;
250         unsigned int pages_needed = size >> PAGE_SHIFT;
251
252         mask = (u_long) -1L;
253         mask >>= BITS_PER_LONG - pages_needed;
254
255         DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
256                 size, pages_needed, mask);
257
258         spin_lock_irqsave(&pcxl_res_lock, flags);
259
260         if(pages_needed <= 8) {
261                 PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
262         } else if(pages_needed <= 16) {
263                 PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
264         } else if(pages_needed <= 32) {
265                 PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
266         } else {
267                 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
268                       __FILE__);
269         }
270
271         dump_resmap();
272         panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
273               __FILE__);
274         
275 resource_found:
276         
277         DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
278                 res_idx, mask, pcxl_res_hint);
279
280         pcxl_used_pages += pages_needed;
281         pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
282
283         spin_unlock_irqrestore(&pcxl_res_lock, flags);
284
285         dump_resmap();
286
287         /* 
288         ** return the corresponding vaddr in the pcxl dma map
289         */
290         return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
291 }
292
293 #define PCXL_FREE_MAPPINGS(idx, m, size) \
294                 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
295                 /* BUG_ON((*res_ptr & m) != m); */ \
296                 *res_ptr &= ~m;
297
298 /*
299 ** clear bits in the pcxl resource map
300 */
301 static void
302 pcxl_free_range(unsigned long vaddr, size_t size)
303 {
304         u_long mask, flags;
305         unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
306         unsigned int pages_mapped = size >> PAGE_SHIFT;
307
308         mask = (u_long) -1L;
309         mask >>= BITS_PER_LONG - pages_mapped;
310
311         DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
312                 res_idx, size, pages_mapped, mask);
313
314         spin_lock_irqsave(&pcxl_res_lock, flags);
315
316         if(pages_mapped <= 8) {
317                 PCXL_FREE_MAPPINGS(res_idx, mask, 8);
318         } else if(pages_mapped <= 16) {
319                 PCXL_FREE_MAPPINGS(res_idx, mask, 16);
320         } else if(pages_mapped <= 32) {
321                 PCXL_FREE_MAPPINGS(res_idx, mask, 32);
322         } else {
323                 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
324                       __FILE__);
325         }
326         
327         pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
328         pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
329
330         spin_unlock_irqrestore(&pcxl_res_lock, flags);
331
332         dump_resmap();
333 }
334
335 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
336 {
337 #if 0
338         u_long i = 0;
339         unsigned long *res_ptr = (u_long *)pcxl_res_map;
340 #endif
341         unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
342
343         seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
344                 PCXL_DMA_MAP_SIZE, total_pages);
345
346         seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
347
348         seq_puts(m,  "            total:    free:    used:   % used:\n");
349         seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
350                 pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
351                 (pcxl_used_bytes * 100) / pcxl_res_size);
352
353         seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
354                 total_pages - pcxl_used_pages, pcxl_used_pages,
355                 (pcxl_used_pages * 100 / total_pages));
356
357 #if 0
358         seq_puts(m, "\nResource bitmap:");
359
360         for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
361                 if ((i & 7) == 0)
362                     seq_puts(m,"\n   ");
363                 seq_printf(m, "%s %08lx", buf, *res_ptr);
364         }
365 #endif
366         seq_putc(m, '\n');
367         return 0;
368 }
369
370 static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
371 {
372         return single_open(file, proc_pcxl_dma_show, NULL);
373 }
374
375 static const struct file_operations proc_pcxl_dma_ops = {
376         .owner          = THIS_MODULE,
377         .open           = proc_pcxl_dma_open,
378         .read           = seq_read,
379         .llseek         = seq_lseek,
380         .release        = single_release,
381 };
382
383 static int __init
384 pcxl_dma_init(void)
385 {
386         if (pcxl_dma_start == 0)
387                 return 0;
388
389         pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
390         pcxl_res_hint = 0;
391         pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
392                                             get_order(pcxl_res_size));
393         memset(pcxl_res_map, 0, pcxl_res_size);
394         proc_gsc_root = proc_mkdir("gsc", NULL);
395         if (!proc_gsc_root)
396                 printk(KERN_WARNING
397                         "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
398         else {
399                 struct proc_dir_entry* ent;
400                 ent = proc_create("pcxl_dma", 0, proc_gsc_root,
401                                   &proc_pcxl_dma_ops);
402                 if (!ent)
403                         printk(KERN_WARNING
404                                 "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
405         }
406         return 0;
407 }
408
409 __initcall(pcxl_dma_init);
410
411 static void *pa11_dma_alloc(struct device *dev, size_t size,
412                 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
413 {
414         unsigned long vaddr;
415         unsigned long paddr;
416         int order;
417
418         order = get_order(size);
419         size = 1 << (order + PAGE_SHIFT);
420         vaddr = pcxl_alloc_range(size);
421         paddr = __get_free_pages(flag, order);
422         flush_kernel_dcache_range(paddr, size);
423         paddr = __pa(paddr);
424         map_uncached_pages(vaddr, size, paddr);
425         *dma_handle = (dma_addr_t) paddr;
426
427 #if 0
428 /* This probably isn't needed to support EISA cards.
429 ** ISA cards will certainly only support 24-bit DMA addressing.
430 ** Not clear if we can, want, or need to support ISA.
431 */
432         if (!dev || *dev->coherent_dma_mask < 0xffffffff)
433                 gfp |= GFP_DMA;
434 #endif
435         return (void *)vaddr;
436 }
437
438 static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
439                 dma_addr_t dma_handle, unsigned long attrs)
440 {
441         int order;
442
443         order = get_order(size);
444         size = 1 << (order + PAGE_SHIFT);
445         unmap_uncached_pages((unsigned long)vaddr, size);
446         pcxl_free_range((unsigned long)vaddr, size);
447         free_pages((unsigned long)__va(dma_handle), order);
448 }
449
450 static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
451                 unsigned long offset, size_t size,
452                 enum dma_data_direction direction, unsigned long attrs)
453 {
454         void *addr = page_address(page) + offset;
455         BUG_ON(direction == DMA_NONE);
456
457         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
458                 flush_kernel_dcache_range((unsigned long) addr, size);
459
460         return virt_to_phys(addr);
461 }
462
463 static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
464                 size_t size, enum dma_data_direction direction,
465                 unsigned long attrs)
466 {
467         BUG_ON(direction == DMA_NONE);
468
469         if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
470                 return;
471
472         if (direction == DMA_TO_DEVICE)
473                 return;
474
475         /*
476          * For PCI_DMA_FROMDEVICE this flush is not necessary for the
477          * simple map/unmap case. However, it IS necessary if if
478          * pci_dma_sync_single_* has been called and the buffer reused.
479          */
480
481         flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
482 }
483
484 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
485                 int nents, enum dma_data_direction direction,
486                 unsigned long attrs)
487 {
488         int i;
489         struct scatterlist *sg;
490
491         BUG_ON(direction == DMA_NONE);
492
493         for_each_sg(sglist, sg, nents, i) {
494                 unsigned long vaddr = (unsigned long)sg_virt(sg);
495
496                 sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
497                 sg_dma_len(sg) = sg->length;
498
499                 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
500                         continue;
501
502                 flush_kernel_dcache_range(vaddr, sg->length);
503         }
504         return nents;
505 }
506
507 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
508                 int nents, enum dma_data_direction direction,
509                 unsigned long attrs)
510 {
511         int i;
512         struct scatterlist *sg;
513
514         BUG_ON(direction == DMA_NONE);
515
516         if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
517                 return;
518
519         if (direction == DMA_TO_DEVICE)
520                 return;
521
522         /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
523
524         for_each_sg(sglist, sg, nents, i)
525                 flush_kernel_vmap_range(sg_virt(sg), sg->length);
526 }
527
528 static void pa11_dma_sync_single_for_cpu(struct device *dev,
529                 dma_addr_t dma_handle, size_t size,
530                 enum dma_data_direction direction)
531 {
532         BUG_ON(direction == DMA_NONE);
533
534         flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
535                         size);
536 }
537
538 static void pa11_dma_sync_single_for_device(struct device *dev,
539                 dma_addr_t dma_handle, size_t size,
540                 enum dma_data_direction direction)
541 {
542         BUG_ON(direction == DMA_NONE);
543
544         flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
545                         size);
546 }
547
548 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
549 {
550         int i;
551         struct scatterlist *sg;
552
553         /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
554
555         for_each_sg(sglist, sg, nents, i)
556                 flush_kernel_vmap_range(sg_virt(sg), sg->length);
557 }
558
559 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
560 {
561         int i;
562         struct scatterlist *sg;
563
564         /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
565
566         for_each_sg(sglist, sg, nents, i)
567                 flush_kernel_vmap_range(sg_virt(sg), sg->length);
568 }
569
570 static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
571                enum dma_data_direction direction)
572 {
573         flush_kernel_dcache_range((unsigned long)vaddr, size);
574 }
575
576 const struct dma_map_ops pcxl_dma_ops = {
577         .alloc =                pa11_dma_alloc,
578         .free =                 pa11_dma_free,
579         .map_page =             pa11_dma_map_page,
580         .unmap_page =           pa11_dma_unmap_page,
581         .map_sg =               pa11_dma_map_sg,
582         .unmap_sg =             pa11_dma_unmap_sg,
583         .sync_single_for_cpu =  pa11_dma_sync_single_for_cpu,
584         .sync_single_for_device = pa11_dma_sync_single_for_device,
585         .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
586         .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
587         .cache_sync =           pa11_dma_cache_sync,
588 };
589
590 static void *pcx_dma_alloc(struct device *dev, size_t size,
591                 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
592 {
593         void *addr;
594
595         if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
596                 return NULL;
597
598         addr = (void *)__get_free_pages(flag, get_order(size));
599         if (addr)
600                 *dma_handle = (dma_addr_t)virt_to_phys(addr);
601
602         return addr;
603 }
604
605 static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
606                 dma_addr_t iova, unsigned long attrs)
607 {
608         free_pages((unsigned long)vaddr, get_order(size));
609         return;
610 }
611
612 const struct dma_map_ops pcx_dma_ops = {
613         .alloc =                pcx_dma_alloc,
614         .free =                 pcx_dma_free,
615         .map_page =             pa11_dma_map_page,
616         .unmap_page =           pa11_dma_unmap_page,
617         .map_sg =               pa11_dma_map_sg,
618         .unmap_sg =             pa11_dma_unmap_sg,
619         .sync_single_for_cpu =  pa11_dma_sync_single_for_cpu,
620         .sync_single_for_device = pa11_dma_sync_single_for_device,
621         .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
622         .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
623         .cache_sync =           pa11_dma_cache_sync,
624 };