Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / arch / powerpc / kernel / iommu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4  * 
5  * Rewrite, cleanup, new allocation schemes, virtual merging: 
6  * Copyright (C) 2004 Olof Johansson, IBM Corporation
7  *               and  Ben. Herrenschmidt, IBM Corporation
8  *
9  * Dynamic DMA mapping support, bus-independent parts.
10  */
11
12
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
29 #include <asm/io.h>
30 #include <asm/prom.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/kdump.h>
35 #include <asm/fadump.h>
36 #include <asm/vio.h>
37 #include <asm/tce.h>
38 #include <asm/mmu_context.h>
39
40 #define DBG(...)
41
42 #ifdef CONFIG_IOMMU_DEBUGFS
43 static int iommu_debugfs_weight_get(void *data, u64 *val)
44 {
45         struct iommu_table *tbl = data;
46         *val = bitmap_weight(tbl->it_map, tbl->it_size);
47         return 0;
48 }
49 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
50
51 static void iommu_debugfs_add(struct iommu_table *tbl)
52 {
53         char name[10];
54         struct dentry *liobn_entry;
55
56         sprintf(name, "%08lx", tbl->it_index);
57         liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
58
59         debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60         debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61         debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62         debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63         debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64         debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65         debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
66 }
67
68 static void iommu_debugfs_del(struct iommu_table *tbl)
69 {
70         char name[10];
71         struct dentry *liobn_entry;
72
73         sprintf(name, "%08lx", tbl->it_index);
74         liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
75         debugfs_remove(liobn_entry);
76 }
77 #else
78 static void iommu_debugfs_add(struct iommu_table *tbl){}
79 static void iommu_debugfs_del(struct iommu_table *tbl){}
80 #endif
81
82 static int novmerge;
83
84 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
85
86 static int __init setup_iommu(char *str)
87 {
88         if (!strcmp(str, "novmerge"))
89                 novmerge = 1;
90         else if (!strcmp(str, "vmerge"))
91                 novmerge = 0;
92         return 1;
93 }
94
95 __setup("iommu=", setup_iommu);
96
97 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
98
99 /*
100  * We precalculate the hash to avoid doing it on every allocation.
101  *
102  * The hash is important to spread CPUs across all the pools. For example,
103  * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
104  * with 4 pools all primary threads would map to the same pool.
105  */
106 static int __init setup_iommu_pool_hash(void)
107 {
108         unsigned int i;
109
110         for_each_possible_cpu(i)
111                 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
112
113         return 0;
114 }
115 subsys_initcall(setup_iommu_pool_hash);
116
117 #ifdef CONFIG_FAIL_IOMMU
118
119 static DECLARE_FAULT_ATTR(fail_iommu);
120
121 static int __init setup_fail_iommu(char *str)
122 {
123         return setup_fault_attr(&fail_iommu, str);
124 }
125 __setup("fail_iommu=", setup_fail_iommu);
126
127 static bool should_fail_iommu(struct device *dev)
128 {
129         return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
130 }
131
132 static int __init fail_iommu_debugfs(void)
133 {
134         struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
135                                                        NULL, &fail_iommu);
136
137         return PTR_ERR_OR_ZERO(dir);
138 }
139 late_initcall(fail_iommu_debugfs);
140
141 static ssize_t fail_iommu_show(struct device *dev,
142                                struct device_attribute *attr, char *buf)
143 {
144         return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
145 }
146
147 static ssize_t fail_iommu_store(struct device *dev,
148                                 struct device_attribute *attr, const char *buf,
149                                 size_t count)
150 {
151         int i;
152
153         if (count > 0 && sscanf(buf, "%d", &i) > 0)
154                 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
155
156         return count;
157 }
158
159 static DEVICE_ATTR_RW(fail_iommu);
160
161 static int fail_iommu_bus_notify(struct notifier_block *nb,
162                                  unsigned long action, void *data)
163 {
164         struct device *dev = data;
165
166         if (action == BUS_NOTIFY_ADD_DEVICE) {
167                 if (device_create_file(dev, &dev_attr_fail_iommu))
168                         pr_warn("Unable to create IOMMU fault injection sysfs "
169                                 "entries\n");
170         } else if (action == BUS_NOTIFY_DEL_DEVICE) {
171                 device_remove_file(dev, &dev_attr_fail_iommu);
172         }
173
174         return 0;
175 }
176
177 static struct notifier_block fail_iommu_bus_notifier = {
178         .notifier_call = fail_iommu_bus_notify
179 };
180
181 static int __init fail_iommu_setup(void)
182 {
183 #ifdef CONFIG_PCI
184         bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
185 #endif
186 #ifdef CONFIG_IBMVIO
187         bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
188 #endif
189
190         return 0;
191 }
192 /*
193  * Must execute after PCI and VIO subsystem have initialised but before
194  * devices are probed.
195  */
196 arch_initcall(fail_iommu_setup);
197 #else
198 static inline bool should_fail_iommu(struct device *dev)
199 {
200         return false;
201 }
202 #endif
203
204 static unsigned long iommu_range_alloc(struct device *dev,
205                                        struct iommu_table *tbl,
206                                        unsigned long npages,
207                                        unsigned long *handle,
208                                        unsigned long mask,
209                                        unsigned int align_order)
210
211         unsigned long n, end, start;
212         unsigned long limit;
213         int largealloc = npages > 15;
214         int pass = 0;
215         unsigned long align_mask;
216         unsigned long flags;
217         unsigned int pool_nr;
218         struct iommu_pool *pool;
219
220         align_mask = (1ull << align_order) - 1;
221
222         /* This allocator was derived from x86_64's bit string search */
223
224         /* Sanity check */
225         if (unlikely(npages == 0)) {
226                 if (printk_ratelimit())
227                         WARN_ON(1);
228                 return DMA_MAPPING_ERROR;
229         }
230
231         if (should_fail_iommu(dev))
232                 return DMA_MAPPING_ERROR;
233
234         /*
235          * We don't need to disable preemption here because any CPU can
236          * safely use any IOMMU pool.
237          */
238         pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
239
240         if (largealloc)
241                 pool = &(tbl->large_pool);
242         else
243                 pool = &(tbl->pools[pool_nr]);
244
245         spin_lock_irqsave(&(pool->lock), flags);
246
247 again:
248         if ((pass == 0) && handle && *handle &&
249             (*handle >= pool->start) && (*handle < pool->end))
250                 start = *handle;
251         else
252                 start = pool->hint;
253
254         limit = pool->end;
255
256         /* The case below can happen if we have a small segment appended
257          * to a large, or when the previous alloc was at the very end of
258          * the available space. If so, go back to the initial start.
259          */
260         if (start >= limit)
261                 start = pool->start;
262
263         if (limit + tbl->it_offset > mask) {
264                 limit = mask - tbl->it_offset + 1;
265                 /* If we're constrained on address range, first try
266                  * at the masked hint to avoid O(n) search complexity,
267                  * but on second pass, start at 0 in pool 0.
268                  */
269                 if ((start & mask) >= limit || pass > 0) {
270                         spin_unlock(&(pool->lock));
271                         pool = &(tbl->pools[0]);
272                         spin_lock(&(pool->lock));
273                         start = pool->start;
274                 } else {
275                         start &= mask;
276                 }
277         }
278
279         n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
280                         dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
281                         align_mask);
282         if (n == -1) {
283                 if (likely(pass == 0)) {
284                         /* First try the pool from the start */
285                         pool->hint = pool->start;
286                         pass++;
287                         goto again;
288
289                 } else if (pass <= tbl->nr_pools) {
290                         /* Now try scanning all the other pools */
291                         spin_unlock(&(pool->lock));
292                         pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
293                         pool = &tbl->pools[pool_nr];
294                         spin_lock(&(pool->lock));
295                         pool->hint = pool->start;
296                         pass++;
297                         goto again;
298
299                 } else if (pass == tbl->nr_pools + 1) {
300                         /* Last resort: try largepool */
301                         spin_unlock(&pool->lock);
302                         pool = &tbl->large_pool;
303                         spin_lock(&pool->lock);
304                         pool->hint = pool->start;
305                         pass++;
306                         goto again;
307
308                 } else {
309                         /* Give up */
310                         spin_unlock_irqrestore(&(pool->lock), flags);
311                         return DMA_MAPPING_ERROR;
312                 }
313         }
314
315         end = n + npages;
316
317         /* Bump the hint to a new block for small allocs. */
318         if (largealloc) {
319                 /* Don't bump to new block to avoid fragmentation */
320                 pool->hint = end;
321         } else {
322                 /* Overflow will be taken care of at the next allocation */
323                 pool->hint = (end + tbl->it_blocksize - 1) &
324                                 ~(tbl->it_blocksize - 1);
325         }
326
327         /* Update handle for SG allocations */
328         if (handle)
329                 *handle = end;
330
331         spin_unlock_irqrestore(&(pool->lock), flags);
332
333         return n;
334 }
335
336 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
337                               void *page, unsigned int npages,
338                               enum dma_data_direction direction,
339                               unsigned long mask, unsigned int align_order,
340                               unsigned long attrs)
341 {
342         unsigned long entry;
343         dma_addr_t ret = DMA_MAPPING_ERROR;
344         int build_fail;
345
346         entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
347
348         if (unlikely(entry == DMA_MAPPING_ERROR))
349                 return DMA_MAPPING_ERROR;
350
351         entry += tbl->it_offset;        /* Offset into real TCE table */
352         ret = entry << tbl->it_page_shift;      /* Set the return dma address */
353
354         /* Put the TCEs in the HW table */
355         build_fail = tbl->it_ops->set(tbl, entry, npages,
356                                       (unsigned long)page &
357                                       IOMMU_PAGE_MASK(tbl), direction, attrs);
358
359         /* tbl->it_ops->set() only returns non-zero for transient errors.
360          * Clean up the table bitmap in this case and return
361          * DMA_MAPPING_ERROR. For all other errors the functionality is
362          * not altered.
363          */
364         if (unlikely(build_fail)) {
365                 __iommu_free(tbl, ret, npages);
366                 return DMA_MAPPING_ERROR;
367         }
368
369         /* Flush/invalidate TLB caches if necessary */
370         if (tbl->it_ops->flush)
371                 tbl->it_ops->flush(tbl);
372
373         /* Make sure updates are seen by hardware */
374         mb();
375
376         return ret;
377 }
378
379 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
380                              unsigned int npages)
381 {
382         unsigned long entry, free_entry;
383
384         entry = dma_addr >> tbl->it_page_shift;
385         free_entry = entry - tbl->it_offset;
386
387         if (((free_entry + npages) > tbl->it_size) ||
388             (entry < tbl->it_offset)) {
389                 if (printk_ratelimit()) {
390                         printk(KERN_INFO "iommu_free: invalid entry\n");
391                         printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
392                         printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
393                         printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
394                         printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
395                         printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
396                         printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
397                         printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
398                         WARN_ON(1);
399                 }
400
401                 return false;
402         }
403
404         return true;
405 }
406
407 static struct iommu_pool *get_pool(struct iommu_table *tbl,
408                                    unsigned long entry)
409 {
410         struct iommu_pool *p;
411         unsigned long largepool_start = tbl->large_pool.start;
412
413         /* The large pool is the last pool at the top of the table */
414         if (entry >= largepool_start) {
415                 p = &tbl->large_pool;
416         } else {
417                 unsigned int pool_nr = entry / tbl->poolsize;
418
419                 BUG_ON(pool_nr > tbl->nr_pools);
420                 p = &tbl->pools[pool_nr];
421         }
422
423         return p;
424 }
425
426 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
427                          unsigned int npages)
428 {
429         unsigned long entry, free_entry;
430         unsigned long flags;
431         struct iommu_pool *pool;
432
433         entry = dma_addr >> tbl->it_page_shift;
434         free_entry = entry - tbl->it_offset;
435
436         pool = get_pool(tbl, free_entry);
437
438         if (!iommu_free_check(tbl, dma_addr, npages))
439                 return;
440
441         tbl->it_ops->clear(tbl, entry, npages);
442
443         spin_lock_irqsave(&(pool->lock), flags);
444         bitmap_clear(tbl->it_map, free_entry, npages);
445         spin_unlock_irqrestore(&(pool->lock), flags);
446 }
447
448 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
449                 unsigned int npages)
450 {
451         __iommu_free(tbl, dma_addr, npages);
452
453         /* Make sure TLB cache is flushed if the HW needs it. We do
454          * not do an mb() here on purpose, it is not needed on any of
455          * the current platforms.
456          */
457         if (tbl->it_ops->flush)
458                 tbl->it_ops->flush(tbl);
459 }
460
461 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
462                      struct scatterlist *sglist, int nelems,
463                      unsigned long mask, enum dma_data_direction direction,
464                      unsigned long attrs)
465 {
466         dma_addr_t dma_next = 0, dma_addr;
467         struct scatterlist *s, *outs, *segstart;
468         int outcount, incount, i, build_fail = 0;
469         unsigned int align;
470         unsigned long handle;
471         unsigned int max_seg_size;
472
473         BUG_ON(direction == DMA_NONE);
474
475         if ((nelems == 0) || !tbl)
476                 return 0;
477
478         outs = s = segstart = &sglist[0];
479         outcount = 1;
480         incount = nelems;
481         handle = 0;
482
483         /* Init first segment length for backout at failure */
484         outs->dma_length = 0;
485
486         DBG("sg mapping %d elements:\n", nelems);
487
488         max_seg_size = dma_get_max_seg_size(dev);
489         for_each_sg(sglist, s, nelems, i) {
490                 unsigned long vaddr, npages, entry, slen;
491
492                 slen = s->length;
493                 /* Sanity check */
494                 if (slen == 0) {
495                         dma_next = 0;
496                         continue;
497                 }
498                 /* Allocate iommu entries for that segment */
499                 vaddr = (unsigned long) sg_virt(s);
500                 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
501                 align = 0;
502                 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
503                     (vaddr & ~PAGE_MASK) == 0)
504                         align = PAGE_SHIFT - tbl->it_page_shift;
505                 entry = iommu_range_alloc(dev, tbl, npages, &handle,
506                                           mask >> tbl->it_page_shift, align);
507
508                 DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
509
510                 /* Handle failure */
511                 if (unlikely(entry == DMA_MAPPING_ERROR)) {
512                         if (!(attrs & DMA_ATTR_NO_WARN) &&
513                             printk_ratelimit())
514                                 dev_info(dev, "iommu_alloc failed, tbl %p "
515                                          "vaddr %lx npages %lu\n", tbl, vaddr,
516                                          npages);
517                         goto failure;
518                 }
519
520                 /* Convert entry to a dma_addr_t */
521                 entry += tbl->it_offset;
522                 dma_addr = entry << tbl->it_page_shift;
523                 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
524
525                 DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
526                             npages, entry, dma_addr);
527
528                 /* Insert into HW table */
529                 build_fail = tbl->it_ops->set(tbl, entry, npages,
530                                               vaddr & IOMMU_PAGE_MASK(tbl),
531                                               direction, attrs);
532                 if(unlikely(build_fail))
533                         goto failure;
534
535                 /* If we are in an open segment, try merging */
536                 if (segstart != s) {
537                         DBG("  - trying merge...\n");
538                         /* We cannot merge if:
539                          * - allocated dma_addr isn't contiguous to previous allocation
540                          */
541                         if (novmerge || (dma_addr != dma_next) ||
542                             (outs->dma_length + s->length > max_seg_size)) {
543                                 /* Can't merge: create a new segment */
544                                 segstart = s;
545                                 outcount++;
546                                 outs = sg_next(outs);
547                                 DBG("    can't merge, new segment.\n");
548                         } else {
549                                 outs->dma_length += s->length;
550                                 DBG("    merged, new len: %ux\n", outs->dma_length);
551                         }
552                 }
553
554                 if (segstart == s) {
555                         /* This is a new segment, fill entries */
556                         DBG("  - filling new segment.\n");
557                         outs->dma_address = dma_addr;
558                         outs->dma_length = slen;
559                 }
560
561                 /* Calculate next page pointer for contiguous check */
562                 dma_next = dma_addr + slen;
563
564                 DBG("  - dma next is: %lx\n", dma_next);
565         }
566
567         /* Flush/invalidate TLB caches if necessary */
568         if (tbl->it_ops->flush)
569                 tbl->it_ops->flush(tbl);
570
571         DBG("mapped %d elements:\n", outcount);
572
573         /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
574          * next entry of the sglist if we didn't fill the list completely
575          */
576         if (outcount < incount) {
577                 outs = sg_next(outs);
578                 outs->dma_address = DMA_MAPPING_ERROR;
579                 outs->dma_length = 0;
580         }
581
582         /* Make sure updates are seen by hardware */
583         mb();
584
585         return outcount;
586
587  failure:
588         for_each_sg(sglist, s, nelems, i) {
589                 if (s->dma_length != 0) {
590                         unsigned long vaddr, npages;
591
592                         vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
593                         npages = iommu_num_pages(s->dma_address, s->dma_length,
594                                                  IOMMU_PAGE_SIZE(tbl));
595                         __iommu_free(tbl, vaddr, npages);
596                         s->dma_address = DMA_MAPPING_ERROR;
597                         s->dma_length = 0;
598                 }
599                 if (s == outs)
600                         break;
601         }
602         return 0;
603 }
604
605
606 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
607                         int nelems, enum dma_data_direction direction,
608                         unsigned long attrs)
609 {
610         struct scatterlist *sg;
611
612         BUG_ON(direction == DMA_NONE);
613
614         if (!tbl)
615                 return;
616
617         sg = sglist;
618         while (nelems--) {
619                 unsigned int npages;
620                 dma_addr_t dma_handle = sg->dma_address;
621
622                 if (sg->dma_length == 0)
623                         break;
624                 npages = iommu_num_pages(dma_handle, sg->dma_length,
625                                          IOMMU_PAGE_SIZE(tbl));
626                 __iommu_free(tbl, dma_handle, npages);
627                 sg = sg_next(sg);
628         }
629
630         /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
631          * do not do an mb() here, the affected platforms do not need it
632          * when freeing.
633          */
634         if (tbl->it_ops->flush)
635                 tbl->it_ops->flush(tbl);
636 }
637
638 static void iommu_table_clear(struct iommu_table *tbl)
639 {
640         /*
641          * In case of firmware assisted dump system goes through clean
642          * reboot process at the time of system crash. Hence it's safe to
643          * clear the TCE entries if firmware assisted dump is active.
644          */
645         if (!is_kdump_kernel() || is_fadump_active()) {
646                 /* Clear the table in case firmware left allocations in it */
647                 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
648                 return;
649         }
650
651 #ifdef CONFIG_CRASH_DUMP
652         if (tbl->it_ops->get) {
653                 unsigned long index, tceval, tcecount = 0;
654
655                 /* Reserve the existing mappings left by the first kernel. */
656                 for (index = 0; index < tbl->it_size; index++) {
657                         tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
658                         /*
659                          * Freed TCE entry contains 0x7fffffffffffffff on JS20
660                          */
661                         if (tceval && (tceval != 0x7fffffffffffffffUL)) {
662                                 __set_bit(index, tbl->it_map);
663                                 tcecount++;
664                         }
665                 }
666
667                 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
668                         printk(KERN_WARNING "TCE table is full; freeing ");
669                         printk(KERN_WARNING "%d entries for the kdump boot\n",
670                                 KDUMP_MIN_TCE_ENTRIES);
671                         for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
672                                 index < tbl->it_size; index++)
673                                 __clear_bit(index, tbl->it_map);
674                 }
675         }
676 #endif
677 }
678
679 static void iommu_table_reserve_pages(struct iommu_table *tbl,
680                 unsigned long res_start, unsigned long res_end)
681 {
682         int i;
683
684         WARN_ON_ONCE(res_end < res_start);
685         /*
686          * Reserve page 0 so it will not be used for any mappings.
687          * This avoids buggy drivers that consider page 0 to be invalid
688          * to crash the machine or even lose data.
689          */
690         if (tbl->it_offset == 0)
691                 set_bit(0, tbl->it_map);
692
693         tbl->it_reserved_start = res_start;
694         tbl->it_reserved_end = res_end;
695
696         /* Check if res_start..res_end isn't empty and overlaps the table */
697         if (res_start && res_end &&
698                         (tbl->it_offset + tbl->it_size < res_start ||
699                          res_end < tbl->it_offset))
700                 return;
701
702         for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
703                 set_bit(i - tbl->it_offset, tbl->it_map);
704 }
705
706 static void iommu_table_release_pages(struct iommu_table *tbl)
707 {
708         int i;
709
710         /*
711          * In case we have reserved the first bit, we should not emit
712          * the warning below.
713          */
714         if (tbl->it_offset == 0)
715                 clear_bit(0, tbl->it_map);
716
717         for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
718                 clear_bit(i - tbl->it_offset, tbl->it_map);
719 }
720
721 /*
722  * Build a iommu_table structure.  This contains a bit map which
723  * is used to manage allocation of the tce space.
724  */
725 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
726                 unsigned long res_start, unsigned long res_end)
727 {
728         unsigned long sz;
729         static int welcomed = 0;
730         unsigned int i;
731         struct iommu_pool *p;
732
733         BUG_ON(!tbl->it_ops);
734
735         /* number of bytes needed for the bitmap */
736         sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
737
738         tbl->it_map = vzalloc_node(sz, nid);
739         if (!tbl->it_map) {
740                 pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
741                 return NULL;
742         }
743
744         iommu_table_reserve_pages(tbl, res_start, res_end);
745
746         /* We only split the IOMMU table if we have 1GB or more of space */
747         if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
748                 tbl->nr_pools = IOMMU_NR_POOLS;
749         else
750                 tbl->nr_pools = 1;
751
752         /* We reserve the top 1/4 of the table for large allocations */
753         tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
754
755         for (i = 0; i < tbl->nr_pools; i++) {
756                 p = &tbl->pools[i];
757                 spin_lock_init(&(p->lock));
758                 p->start = tbl->poolsize * i;
759                 p->hint = p->start;
760                 p->end = p->start + tbl->poolsize;
761         }
762
763         p = &tbl->large_pool;
764         spin_lock_init(&(p->lock));
765         p->start = tbl->poolsize * i;
766         p->hint = p->start;
767         p->end = tbl->it_size;
768
769         iommu_table_clear(tbl);
770
771         if (!welcomed) {
772                 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
773                        novmerge ? "disabled" : "enabled");
774                 welcomed = 1;
775         }
776
777         iommu_debugfs_add(tbl);
778
779         return tbl;
780 }
781
782 static void iommu_table_free(struct kref *kref)
783 {
784         struct iommu_table *tbl;
785
786         tbl = container_of(kref, struct iommu_table, it_kref);
787
788         if (tbl->it_ops->free)
789                 tbl->it_ops->free(tbl);
790
791         if (!tbl->it_map) {
792                 kfree(tbl);
793                 return;
794         }
795
796         iommu_debugfs_del(tbl);
797
798         iommu_table_release_pages(tbl);
799
800         /* verify that table contains no entries */
801         if (!bitmap_empty(tbl->it_map, tbl->it_size))
802                 pr_warn("%s: Unexpected TCEs\n", __func__);
803
804         /* free bitmap */
805         vfree(tbl->it_map);
806
807         /* free table */
808         kfree(tbl);
809 }
810
811 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
812 {
813         if (kref_get_unless_zero(&tbl->it_kref))
814                 return tbl;
815
816         return NULL;
817 }
818 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
819
820 int iommu_tce_table_put(struct iommu_table *tbl)
821 {
822         if (WARN_ON(!tbl))
823                 return 0;
824
825         return kref_put(&tbl->it_kref, iommu_table_free);
826 }
827 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
828
829 /* Creates TCEs for a user provided buffer.  The user buffer must be
830  * contiguous real kernel storage (not vmalloc).  The address passed here
831  * comprises a page address and offset into that page. The dma_addr_t
832  * returned will point to the same byte within the page as was passed in.
833  */
834 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
835                           struct page *page, unsigned long offset, size_t size,
836                           unsigned long mask, enum dma_data_direction direction,
837                           unsigned long attrs)
838 {
839         dma_addr_t dma_handle = DMA_MAPPING_ERROR;
840         void *vaddr;
841         unsigned long uaddr;
842         unsigned int npages, align;
843
844         BUG_ON(direction == DMA_NONE);
845
846         vaddr = page_address(page) + offset;
847         uaddr = (unsigned long)vaddr;
848
849         if (tbl) {
850                 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
851                 align = 0;
852                 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
853                     ((unsigned long)vaddr & ~PAGE_MASK) == 0)
854                         align = PAGE_SHIFT - tbl->it_page_shift;
855
856                 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
857                                          mask >> tbl->it_page_shift, align,
858                                          attrs);
859                 if (dma_handle == DMA_MAPPING_ERROR) {
860                         if (!(attrs & DMA_ATTR_NO_WARN) &&
861                             printk_ratelimit())  {
862                                 dev_info(dev, "iommu_alloc failed, tbl %p "
863                                          "vaddr %p npages %d\n", tbl, vaddr,
864                                          npages);
865                         }
866                 } else
867                         dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
868         }
869
870         return dma_handle;
871 }
872
873 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
874                       size_t size, enum dma_data_direction direction,
875                       unsigned long attrs)
876 {
877         unsigned int npages;
878
879         BUG_ON(direction == DMA_NONE);
880
881         if (tbl) {
882                 npages = iommu_num_pages(dma_handle, size,
883                                          IOMMU_PAGE_SIZE(tbl));
884                 iommu_free(tbl, dma_handle, npages);
885         }
886 }
887
888 /* Allocates a contiguous real buffer and creates mappings over it.
889  * Returns the virtual address of the buffer and sets dma_handle
890  * to the dma address (mapping) of the first page.
891  */
892 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
893                            size_t size, dma_addr_t *dma_handle,
894                            unsigned long mask, gfp_t flag, int node)
895 {
896         void *ret = NULL;
897         dma_addr_t mapping;
898         unsigned int order;
899         unsigned int nio_pages, io_order;
900         struct page *page;
901
902         size = PAGE_ALIGN(size);
903         order = get_order(size);
904
905         /*
906          * Client asked for way too much space.  This is checked later
907          * anyway.  It is easier to debug here for the drivers than in
908          * the tce tables.
909          */
910         if (order >= IOMAP_MAX_ORDER) {
911                 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
912                          size);
913                 return NULL;
914         }
915
916         if (!tbl)
917                 return NULL;
918
919         /* Alloc enough pages (and possibly more) */
920         page = alloc_pages_node(node, flag, order);
921         if (!page)
922                 return NULL;
923         ret = page_address(page);
924         memset(ret, 0, size);
925
926         /* Set up tces to cover the allocated range */
927         nio_pages = size >> tbl->it_page_shift;
928         io_order = get_iommu_order(size, tbl);
929         mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
930                               mask >> tbl->it_page_shift, io_order, 0);
931         if (mapping == DMA_MAPPING_ERROR) {
932                 free_pages((unsigned long)ret, order);
933                 return NULL;
934         }
935         *dma_handle = mapping;
936         return ret;
937 }
938
939 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
940                          void *vaddr, dma_addr_t dma_handle)
941 {
942         if (tbl) {
943                 unsigned int nio_pages;
944
945                 size = PAGE_ALIGN(size);
946                 nio_pages = size >> tbl->it_page_shift;
947                 iommu_free(tbl, dma_handle, nio_pages);
948                 size = PAGE_ALIGN(size);
949                 free_pages((unsigned long)vaddr, get_order(size));
950         }
951 }
952
953 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
954 {
955         switch (dir) {
956         case DMA_BIDIRECTIONAL:
957                 return TCE_PCI_READ | TCE_PCI_WRITE;
958         case DMA_FROM_DEVICE:
959                 return TCE_PCI_WRITE;
960         case DMA_TO_DEVICE:
961                 return TCE_PCI_READ;
962         default:
963                 return 0;
964         }
965 }
966 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
967
968 #ifdef CONFIG_IOMMU_API
969 /*
970  * SPAPR TCE API
971  */
972 static void group_release(void *iommu_data)
973 {
974         struct iommu_table_group *table_group = iommu_data;
975
976         table_group->group = NULL;
977 }
978
979 void iommu_register_group(struct iommu_table_group *table_group,
980                 int pci_domain_number, unsigned long pe_num)
981 {
982         struct iommu_group *grp;
983         char *name;
984
985         grp = iommu_group_alloc();
986         if (IS_ERR(grp)) {
987                 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
988                                 PTR_ERR(grp));
989                 return;
990         }
991         table_group->group = grp;
992         iommu_group_set_iommudata(grp, table_group, group_release);
993         name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
994                         pci_domain_number, pe_num);
995         if (!name)
996                 return;
997         iommu_group_set_name(grp, name);
998         kfree(name);
999 }
1000
1001 enum dma_data_direction iommu_tce_direction(unsigned long tce)
1002 {
1003         if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1004                 return DMA_BIDIRECTIONAL;
1005         else if (tce & TCE_PCI_READ)
1006                 return DMA_TO_DEVICE;
1007         else if (tce & TCE_PCI_WRITE)
1008                 return DMA_FROM_DEVICE;
1009         else
1010                 return DMA_NONE;
1011 }
1012 EXPORT_SYMBOL_GPL(iommu_tce_direction);
1013
1014 void iommu_flush_tce(struct iommu_table *tbl)
1015 {
1016         /* Flush/invalidate TLB caches if necessary */
1017         if (tbl->it_ops->flush)
1018                 tbl->it_ops->flush(tbl);
1019
1020         /* Make sure updates are seen by hardware */
1021         mb();
1022 }
1023 EXPORT_SYMBOL_GPL(iommu_flush_tce);
1024
1025 int iommu_tce_check_ioba(unsigned long page_shift,
1026                 unsigned long offset, unsigned long size,
1027                 unsigned long ioba, unsigned long npages)
1028 {
1029         unsigned long mask = (1UL << page_shift) - 1;
1030
1031         if (ioba & mask)
1032                 return -EINVAL;
1033
1034         ioba >>= page_shift;
1035         if (ioba < offset)
1036                 return -EINVAL;
1037
1038         if ((ioba + 1) > (offset + size))
1039                 return -EINVAL;
1040
1041         return 0;
1042 }
1043 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1044
1045 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1046 {
1047         unsigned long mask = (1UL << page_shift) - 1;
1048
1049         if (gpa & mask)
1050                 return -EINVAL;
1051
1052         return 0;
1053 }
1054 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1055
1056 extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1057                 struct iommu_table *tbl,
1058                 unsigned long entry, unsigned long *hpa,
1059                 enum dma_data_direction *direction)
1060 {
1061         long ret;
1062         unsigned long size = 0;
1063
1064         ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
1065         if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1066                         (*direction == DMA_BIDIRECTIONAL)) &&
1067                         !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1068                                         &size))
1069                 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1070
1071         return ret;
1072 }
1073 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1074
1075 void iommu_tce_kill(struct iommu_table *tbl,
1076                 unsigned long entry, unsigned long pages)
1077 {
1078         if (tbl->it_ops->tce_kill)
1079                 tbl->it_ops->tce_kill(tbl, entry, pages, false);
1080 }
1081 EXPORT_SYMBOL_GPL(iommu_tce_kill);
1082
1083 int iommu_take_ownership(struct iommu_table *tbl)
1084 {
1085         unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1086         int ret = 0;
1087
1088         /*
1089          * VFIO does not control TCE entries allocation and the guest
1090          * can write new TCEs on top of existing ones so iommu_tce_build()
1091          * must be able to release old pages. This functionality
1092          * requires exchange() callback defined so if it is not
1093          * implemented, we disallow taking ownership over the table.
1094          */
1095         if (!tbl->it_ops->xchg_no_kill)
1096                 return -EINVAL;
1097
1098         spin_lock_irqsave(&tbl->large_pool.lock, flags);
1099         for (i = 0; i < tbl->nr_pools; i++)
1100                 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1101
1102         iommu_table_release_pages(tbl);
1103
1104         if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1105                 pr_err("iommu_tce: it_map is not empty");
1106                 ret = -EBUSY;
1107                 /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1108                 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1109                                 tbl->it_reserved_end);
1110         } else {
1111                 memset(tbl->it_map, 0xff, sz);
1112         }
1113
1114         for (i = 0; i < tbl->nr_pools; i++)
1115                 spin_unlock(&tbl->pools[i].lock);
1116         spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1117
1118         return ret;
1119 }
1120 EXPORT_SYMBOL_GPL(iommu_take_ownership);
1121
1122 void iommu_release_ownership(struct iommu_table *tbl)
1123 {
1124         unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1125
1126         spin_lock_irqsave(&tbl->large_pool.lock, flags);
1127         for (i = 0; i < tbl->nr_pools; i++)
1128                 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1129
1130         memset(tbl->it_map, 0, sz);
1131
1132         iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1133                         tbl->it_reserved_end);
1134
1135         for (i = 0; i < tbl->nr_pools; i++)
1136                 spin_unlock(&tbl->pools[i].lock);
1137         spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1138 }
1139 EXPORT_SYMBOL_GPL(iommu_release_ownership);
1140
1141 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1142 {
1143         /*
1144          * The sysfs entries should be populated before
1145          * binding IOMMU group. If sysfs entries isn't
1146          * ready, we simply bail.
1147          */
1148         if (!device_is_registered(dev))
1149                 return -ENOENT;
1150
1151         if (device_iommu_mapped(dev)) {
1152                 pr_debug("%s: Skipping device %s with iommu group %d\n",
1153                          __func__, dev_name(dev),
1154                          iommu_group_id(dev->iommu_group));
1155                 return -EBUSY;
1156         }
1157
1158         pr_debug("%s: Adding %s to iommu group %d\n",
1159                  __func__, dev_name(dev),  iommu_group_id(table_group->group));
1160
1161         return iommu_group_add_device(table_group->group, dev);
1162 }
1163 EXPORT_SYMBOL_GPL(iommu_add_device);
1164
1165 void iommu_del_device(struct device *dev)
1166 {
1167         /*
1168          * Some devices might not have IOMMU table and group
1169          * and we needn't detach them from the associated
1170          * IOMMU groups
1171          */
1172         if (!device_iommu_mapped(dev)) {
1173                 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1174                          dev_name(dev));
1175                 return;
1176         }
1177
1178         iommu_group_remove_device(dev);
1179 }
1180 EXPORT_SYMBOL_GPL(iommu_del_device);
1181 #endif /* CONFIG_IOMMU_API */