[SPARC64]: PCI-SUN4V fixes.
[linux-2.6-microblaze.git] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13
14 #include <asm/pbm.h>
15 #include <asm/iommu.h>
16 #include <asm/irq.h>
17 #include <asm/upa.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
21
22 #include "pci_impl.h"
23 #include "iommu_common.h"
24
25 #include "pci_sun4v.h"
26
27 #define PGLIST_NENTS    2048
28
29 struct sun4v_pglist {
30         u64     pglist[PGLIST_NENTS];
31 };
32
33 static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
34
35 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
36 {
37         unsigned long n, i, start, end, limit;
38         int pass;
39
40         limit = arena->limit;
41         start = arena->hint;
42         pass = 0;
43
44 again:
45         n = find_next_zero_bit(arena->map, limit, start);
46         end = n + npages;
47         if (unlikely(end >= limit)) {
48                 if (likely(pass < 1)) {
49                         limit = start;
50                         start = 0;
51                         pass++;
52                         goto again;
53                 } else {
54                         /* Scanned the whole thing, give up. */
55                         return -1;
56                 }
57         }
58
59         for (i = n; i < end; i++) {
60                 if (test_bit(i, arena->map)) {
61                         start = i + 1;
62                         goto again;
63                 }
64         }
65
66         for (i = n; i < end; i++)
67                 __set_bit(i, arena->map);
68
69         arena->hint = end;
70
71         return n;
72 }
73
74 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
75 {
76         unsigned long i;
77
78         for (i = base; i < (base + npages); i++)
79                 __clear_bit(i, arena->map);
80 }
81
82 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
83 {
84         struct pcidev_cookie *pcp;
85         struct pci_iommu *iommu;
86         unsigned long devhandle, flags, order, first_page, npages, n;
87         void *ret;
88         long entry;
89         u64 *pglist;
90         int cpu;
91
92         size = IO_PAGE_ALIGN(size);
93         order = get_order(size);
94         if (order >= MAX_ORDER)
95                 return NULL;
96
97         npages = size >> IO_PAGE_SHIFT;
98         if (npages > PGLIST_NENTS)
99                 return NULL;
100
101         first_page = __get_free_pages(GFP_ATOMIC, order);
102         if (first_page == 0UL)
103                 return NULL;
104         memset((char *)first_page, 0, PAGE_SIZE << order);
105
106         pcp = pdev->sysdata;
107         devhandle = pcp->pbm->devhandle;
108         iommu = pcp->pbm->iommu;
109
110         spin_lock_irqsave(&iommu->lock, flags);
111         entry = pci_arena_alloc(&iommu->arena, npages);
112         spin_unlock_irqrestore(&iommu->lock, flags);
113
114         if (unlikely(entry < 0L)) {
115                 free_pages(first_page, order);
116                 return NULL;
117         }
118
119         *dma_addrp = (iommu->page_table_map_base +
120                       (entry << IO_PAGE_SHIFT));
121         ret = (void *) first_page;
122         first_page = __pa(first_page);
123
124         cpu = get_cpu();
125
126         pglist = &__get_cpu_var(iommu_pglists).pglist[0];
127         for (n = 0; n < npages; n++)
128                 pglist[n] = first_page + (n * PAGE_SIZE);
129
130         do {
131                 unsigned long num;
132
133                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
134                                           npages,
135                                           (HV_PCI_MAP_ATTR_READ |
136                                            HV_PCI_MAP_ATTR_WRITE),
137                                           __pa(pglist));
138                 entry += num;
139                 npages -= num;
140                 pglist += num;
141         } while (npages != 0);
142
143         put_cpu();
144
145         return ret;
146 }
147
148 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
149 {
150         struct pcidev_cookie *pcp;
151         struct pci_iommu *iommu;
152         unsigned long flags, order, npages, entry, devhandle;
153
154         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
155         pcp = pdev->sysdata;
156         iommu = pcp->pbm->iommu;
157         devhandle = pcp->pbm->devhandle;
158         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
159
160         spin_lock_irqsave(&iommu->lock, flags);
161
162         pci_arena_free(&iommu->arena, entry, npages);
163
164         do {
165                 unsigned long num;
166
167                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
168                                             npages);
169                 entry += num;
170                 npages -= num;
171         } while (npages != 0);
172
173         spin_unlock_irqrestore(&iommu->lock, flags);
174
175         order = get_order(size);
176         if (order < 10)
177                 free_pages((unsigned long)cpu, order);
178 }
179
180 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
181 {
182         struct pcidev_cookie *pcp;
183         struct pci_iommu *iommu;
184         unsigned long flags, npages, oaddr;
185         unsigned long i, base_paddr, devhandle;
186         u32 bus_addr, ret;
187         unsigned long prot;
188         long entry;
189         u64 *pglist;
190         int cpu;
191
192         pcp = pdev->sysdata;
193         iommu = pcp->pbm->iommu;
194         devhandle = pcp->pbm->devhandle;
195
196         if (unlikely(direction == PCI_DMA_NONE))
197                 goto bad;
198
199         oaddr = (unsigned long)ptr;
200         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
201         npages >>= IO_PAGE_SHIFT;
202         if (unlikely(npages > PGLIST_NENTS))
203                 goto bad;
204
205         spin_lock_irqsave(&iommu->lock, flags);
206         entry = pci_arena_alloc(&iommu->arena, npages);
207         spin_unlock_irqrestore(&iommu->lock, flags);
208
209         if (unlikely(entry < 0L))
210                 goto bad;
211
212         bus_addr = (iommu->page_table_map_base +
213                     (entry << IO_PAGE_SHIFT));
214         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
215         base_paddr = __pa(oaddr & IO_PAGE_MASK);
216         prot = HV_PCI_MAP_ATTR_READ;
217         if (direction != PCI_DMA_TODEVICE)
218                 prot |= HV_PCI_MAP_ATTR_WRITE;
219
220         cpu = get_cpu();
221
222         pglist = &__get_cpu_var(iommu_pglists).pglist[0];
223         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
224                 pglist[i] = base_paddr;
225
226         do {
227                 unsigned long num;
228
229                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
230                                           npages, prot,
231                                           __pa(pglist));
232                 entry += num;
233                 npages -= num;
234                 pglist += num;
235         } while (npages != 0);
236
237         put_cpu();
238
239         return ret;
240
241 bad:
242         if (printk_ratelimit())
243                 WARN_ON(1);
244         return PCI_DMA_ERROR_CODE;
245 }
246
247 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
248 {
249         struct pcidev_cookie *pcp;
250         struct pci_iommu *iommu;
251         unsigned long flags, npages, devhandle;
252         long entry;
253
254         if (unlikely(direction == PCI_DMA_NONE)) {
255                 if (printk_ratelimit())
256                         WARN_ON(1);
257                 return;
258         }
259
260         pcp = pdev->sysdata;
261         iommu = pcp->pbm->iommu;
262         devhandle = pcp->pbm->devhandle;
263
264         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
265         npages >>= IO_PAGE_SHIFT;
266         bus_addr &= IO_PAGE_MASK;
267
268         spin_lock_irqsave(&iommu->lock, flags);
269
270         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
271         pci_arena_free(&iommu->arena, entry, npages);
272
273         do {
274                 unsigned long num;
275
276                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
277                                             npages);
278                 entry += num;
279                 npages -= num;
280         } while (npages != 0);
281
282         spin_unlock_irqrestore(&iommu->lock, flags);
283 }
284
285 #define SG_ENT_PHYS_ADDRESS(SG) \
286         (__pa(page_address((SG)->page)) + (SG)->offset)
287
288 static inline void fill_sg(long entry, unsigned long devhandle,
289                            struct scatterlist *sg,
290                            int nused, int nelems, unsigned long prot)
291 {
292         struct scatterlist *dma_sg = sg;
293         struct scatterlist *sg_end = sg + nelems;
294         int i, cpu, pglist_ent;
295         u64 *pglist;
296
297         cpu = get_cpu();
298         pglist = &__get_cpu_var(iommu_pglists).pglist[0];
299         pglist_ent = 0;
300         for (i = 0; i < nused; i++) {
301                 unsigned long pteval = ~0UL;
302                 u32 dma_npages;
303
304                 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
305                               dma_sg->dma_length +
306                               ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
307                 do {
308                         unsigned long offset;
309                         signed int len;
310
311                         /* If we are here, we know we have at least one
312                          * more page to map.  So walk forward until we
313                          * hit a page crossing, and begin creating new
314                          * mappings from that spot.
315                          */
316                         for (;;) {
317                                 unsigned long tmp;
318
319                                 tmp = SG_ENT_PHYS_ADDRESS(sg);
320                                 len = sg->length;
321                                 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
322                                         pteval = tmp & IO_PAGE_MASK;
323                                         offset = tmp & (IO_PAGE_SIZE - 1UL);
324                                         break;
325                                 }
326                                 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
327                                         pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
328                                         offset = 0UL;
329                                         len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
330                                         break;
331                                 }
332                                 sg++;
333                         }
334
335                         pteval = (pteval & IOPTE_PAGE);
336                         while (len > 0) {
337                                 pglist[pglist_ent++] = pteval;
338                                 pteval += IO_PAGE_SIZE;
339                                 len -= (IO_PAGE_SIZE - offset);
340                                 offset = 0;
341                                 dma_npages--;
342                         }
343
344                         pteval = (pteval & IOPTE_PAGE) + len;
345                         sg++;
346
347                         /* Skip over any tail mappings we've fully mapped,
348                          * adjusting pteval along the way.  Stop when we
349                          * detect a page crossing event.
350                          */
351                         while (sg < sg_end &&
352                                (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
353                                (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
354                                ((pteval ^
355                                  (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
356                                 pteval += sg->length;
357                                 sg++;
358                         }
359                         if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
360                                 pteval = ~0UL;
361                 } while (dma_npages != 0);
362                 dma_sg++;
363         }
364
365         BUG_ON(pglist_ent == 0);
366
367         do {
368                 unsigned long num;
369
370                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
371                                             pglist_ent);
372                 entry += num;
373                 pglist_ent -= num;
374         } while (pglist_ent != 0);
375
376         put_cpu();
377 }
378
379 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
380 {
381         struct pcidev_cookie *pcp;
382         struct pci_iommu *iommu;
383         unsigned long flags, npages, prot, devhandle;
384         u32 dma_base;
385         struct scatterlist *sgtmp;
386         long entry;
387         int used;
388
389         /* Fast path single entry scatterlists. */
390         if (nelems == 1) {
391                 sglist->dma_address =
392                         pci_4v_map_single(pdev,
393                                           (page_address(sglist->page) + sglist->offset),
394                                           sglist->length, direction);
395                 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
396                         return 0;
397                 sglist->dma_length = sglist->length;
398                 return 1;
399         }
400
401         pcp = pdev->sysdata;
402         iommu = pcp->pbm->iommu;
403         devhandle = pcp->pbm->devhandle;
404         
405         if (unlikely(direction == PCI_DMA_NONE))
406                 goto bad;
407
408         /* Step 1: Prepare scatter list. */
409         npages = prepare_sg(sglist, nelems);
410         if (unlikely(npages > PGLIST_NENTS))
411                 goto bad;
412
413         /* Step 2: Allocate a cluster and context, if necessary. */
414         spin_lock_irqsave(&iommu->lock, flags);
415         entry = pci_arena_alloc(&iommu->arena, npages);
416         spin_unlock_irqrestore(&iommu->lock, flags);
417
418         if (unlikely(entry < 0L))
419                 goto bad;
420
421         dma_base = iommu->page_table_map_base +
422                 (entry << IO_PAGE_SHIFT);
423
424         /* Step 3: Normalize DMA addresses. */
425         used = nelems;
426
427         sgtmp = sglist;
428         while (used && sgtmp->dma_length) {
429                 sgtmp->dma_address += dma_base;
430                 sgtmp++;
431                 used--;
432         }
433         used = nelems - used;
434
435         /* Step 4: Create the mappings. */
436         prot = HV_PCI_MAP_ATTR_READ;
437         if (direction != PCI_DMA_TODEVICE)
438                 prot |= HV_PCI_MAP_ATTR_WRITE;
439
440         fill_sg(entry, devhandle, sglist, used, nelems, prot);
441
442         return used;
443
444 bad:
445         if (printk_ratelimit())
446                 WARN_ON(1);
447         return 0;
448 }
449
450 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
451 {
452         struct pcidev_cookie *pcp;
453         struct pci_iommu *iommu;
454         unsigned long flags, i, npages, devhandle;
455         long entry;
456         u32 bus_addr;
457
458         if (unlikely(direction == PCI_DMA_NONE)) {
459                 if (printk_ratelimit())
460                         WARN_ON(1);
461         }
462
463         pcp = pdev->sysdata;
464         iommu = pcp->pbm->iommu;
465         devhandle = pcp->pbm->devhandle;
466         
467         bus_addr = sglist->dma_address & IO_PAGE_MASK;
468
469         for (i = 1; i < nelems; i++)
470                 if (sglist[i].dma_length == 0)
471                         break;
472         i--;
473         npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
474                   bus_addr) >> IO_PAGE_SHIFT;
475
476         entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
477
478         spin_lock_irqsave(&iommu->lock, flags);
479
480         pci_arena_free(&iommu->arena, entry, npages);
481
482         do {
483                 unsigned long num;
484
485                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
486                                             npages);
487                 entry += num;
488                 npages -= num;
489         } while (npages != 0);
490
491         spin_unlock_irqrestore(&iommu->lock, flags);
492 }
493
494 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
495 {
496         /* Nothing to do... */
497 }
498
499 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
500 {
501         /* Nothing to do... */
502 }
503
504 struct pci_iommu_ops pci_sun4v_iommu_ops = {
505         .alloc_consistent               = pci_4v_alloc_consistent,
506         .free_consistent                = pci_4v_free_consistent,
507         .map_single                     = pci_4v_map_single,
508         .unmap_single                   = pci_4v_unmap_single,
509         .map_sg                         = pci_4v_map_sg,
510         .unmap_sg                       = pci_4v_unmap_sg,
511         .dma_sync_single_for_cpu        = pci_4v_dma_sync_single_for_cpu,
512         .dma_sync_sg_for_cpu            = pci_4v_dma_sync_sg_for_cpu,
513 };
514
515 /* SUN4V PCI configuration space accessors. */
516
517 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
518                                   int where, int size, u32 *value)
519 {
520         struct pci_pbm_info *pbm = bus_dev->sysdata;
521         unsigned long devhandle = pbm->devhandle;
522         unsigned int bus = bus_dev->number;
523         unsigned int device = PCI_SLOT(devfn);
524         unsigned int func = PCI_FUNC(devfn);
525         unsigned long ret;
526
527         ret = pci_sun4v_config_get(devhandle,
528                                    HV_PCI_DEVICE_BUILD(bus, device, func),
529                                    where, size);
530         switch (size) {
531         case 1:
532                 *value = ret & 0xff;
533                 break;
534         case 2:
535                 *value = ret & 0xffff;
536                 break;
537         case 4:
538                 *value = ret & 0xffffffff;
539                 break;
540         };
541
542
543         return PCIBIOS_SUCCESSFUL;
544 }
545
546 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
547                                    int where, int size, u32 value)
548 {
549         struct pci_pbm_info *pbm = bus_dev->sysdata;
550         unsigned long devhandle = pbm->devhandle;
551         unsigned int bus = bus_dev->number;
552         unsigned int device = PCI_SLOT(devfn);
553         unsigned int func = PCI_FUNC(devfn);
554         unsigned long ret;
555
556         ret = pci_sun4v_config_put(devhandle,
557                                    HV_PCI_DEVICE_BUILD(bus, device, func),
558                                    where, size, value);
559
560         return PCIBIOS_SUCCESSFUL;
561 }
562
563 static struct pci_ops pci_sun4v_ops = {
564         .read =         pci_sun4v_read_pci_cfg,
565         .write =        pci_sun4v_write_pci_cfg,
566 };
567
568
569 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
570 {
571         /* XXX Implement me! XXX */
572 }
573
574 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
575                                         struct pci_dev *pdev,
576                                         unsigned int ino)
577 {
578         /* XXX Implement me! XXX */
579         return 0;
580 }
581
582 /* XXX correct? XXX */
583 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
584 {
585         struct pcidev_cookie *pcp = pdev->sysdata;
586         struct pci_pbm_info *pbm = pcp->pbm;
587         struct resource *res, *root;
588         u32 reg;
589         int where, size, is_64bit;
590
591         res = &pdev->resource[resource];
592         if (resource < 6) {
593                 where = PCI_BASE_ADDRESS_0 + (resource * 4);
594         } else if (resource == PCI_ROM_RESOURCE) {
595                 where = pdev->rom_base_reg;
596         } else {
597                 /* Somebody might have asked allocation of a non-standard resource */
598                 return;
599         }
600
601         is_64bit = 0;
602         if (res->flags & IORESOURCE_IO)
603                 root = &pbm->io_space;
604         else {
605                 root = &pbm->mem_space;
606                 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
607                     == PCI_BASE_ADDRESS_MEM_TYPE_64)
608                         is_64bit = 1;
609         }
610
611         size = res->end - res->start;
612         pci_read_config_dword(pdev, where, &reg);
613         reg = ((reg & size) |
614                (((u32)(res->start - root->start)) & ~size));
615         if (resource == PCI_ROM_RESOURCE) {
616                 reg |= PCI_ROM_ADDRESS_ENABLE;
617                 res->flags |= IORESOURCE_ROM_ENABLE;
618         }
619         pci_write_config_dword(pdev, where, reg);
620
621         /* This knows that the upper 32-bits of the address
622          * must be zero.  Our PCI common layer enforces this.
623          */
624         if (is_64bit)
625                 pci_write_config_dword(pdev, where + 4, 0);
626 }
627
628 /* XXX correct? XXX */
629 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
630                                       struct resource *res,
631                                       struct resource *root)
632 {
633         res->start += root->start;
634         res->end += root->start;
635 }
636
637 /* Use ranges property to determine where PCI MEM, I/O, and Config
638  * space are for this PCI bus module.
639  */
640 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
641 {
642         int i, saw_mem, saw_io;
643
644         saw_mem = saw_io = 0;
645         for (i = 0; i < pbm->num_pbm_ranges; i++) {
646                 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
647                 unsigned long a;
648                 int type;
649
650                 type = (pr->child_phys_hi >> 24) & 0x3;
651                 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
652                      ((unsigned long)pr->parent_phys_lo  <<  0UL));
653
654                 switch (type) {
655                 case 1:
656                         /* 16-bit IO space, 16MB */
657                         pbm->io_space.start = a;
658                         pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
659                         pbm->io_space.flags = IORESOURCE_IO;
660                         saw_io = 1;
661                         break;
662
663                 case 2:
664                         /* 32-bit MEM space, 2GB */
665                         pbm->mem_space.start = a;
666                         pbm->mem_space.end = a + (0x80000000UL - 1UL);
667                         pbm->mem_space.flags = IORESOURCE_MEM;
668                         saw_mem = 1;
669                         break;
670
671                 default:
672                         break;
673                 };
674         }
675
676         if (!saw_io || !saw_mem) {
677                 prom_printf("%s: Fatal error, missing %s PBM range.\n",
678                             pbm->name,
679                             (!saw_io ? "IO" : "MEM"));
680                 prom_halt();
681         }
682
683         printk("%s: PCI IO[%lx] MEM[%lx]\n",
684                pbm->name,
685                pbm->io_space.start,
686                pbm->mem_space.start);
687 }
688
689 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
690                                             struct pci_pbm_info *pbm)
691 {
692         pbm->io_space.name = pbm->mem_space.name = pbm->name;
693
694         request_resource(&ioport_resource, &pbm->io_space);
695         request_resource(&iomem_resource, &pbm->mem_space);
696         pci_register_legacy_regions(&pbm->io_space,
697                                     &pbm->mem_space);
698 }
699
700 static void probe_existing_entries(struct pci_pbm_info *pbm,
701                                    struct pci_iommu *iommu)
702 {
703         struct pci_iommu_arena *arena = &iommu->arena;
704         unsigned long i, devhandle;
705
706         devhandle = pbm->devhandle;
707         for (i = 0; i < arena->limit; i++) {
708                 unsigned long ret, io_attrs, ra;
709
710                 ret = pci_sun4v_iommu_getmap(devhandle,
711                                              HV_PCI_TSBID(0, i),
712                                              &io_attrs, &ra);
713                 if (ret == HV_EOK)
714                         __set_bit(i, arena->map);
715         }
716 }
717
718 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
719 {
720         struct pci_iommu *iommu = pbm->iommu;
721         unsigned long num_tsb_entries, sz;
722         u32 vdma[2], dma_mask, dma_offset;
723         int err, tsbsize;
724
725         err = prom_getproperty(pbm->prom_node, "virtual-dma",
726                                (char *)&vdma[0], sizeof(vdma));
727         if (err == 0 || err == -1) {
728                 /* No property, use default values. */
729                 vdma[0] = 0x80000000;
730                 vdma[1] = 0x80000000;
731         }
732
733         dma_mask = vdma[0];
734         switch (vdma[1]) {
735                 case 0x20000000:
736                         dma_mask |= 0x1fffffff;
737                         tsbsize = 64;
738                         break;
739
740                 case 0x40000000:
741                         dma_mask |= 0x3fffffff;
742                         tsbsize = 128;
743                         break;
744
745                 case 0x80000000:
746                         dma_mask |= 0x7fffffff;
747                         tsbsize = 128;
748                         break;
749
750                 default:
751                         prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
752                         prom_halt();
753         };
754
755         num_tsb_entries = tsbsize / sizeof(iopte_t);
756
757         dma_offset = vdma[0];
758
759         /* Setup initial software IOMMU state. */
760         spin_lock_init(&iommu->lock);
761         iommu->ctx_lowest_free = 1;
762         iommu->page_table_map_base = dma_offset;
763         iommu->dma_addr_mask = dma_mask;
764
765         /* Allocate and initialize the free area map.  */
766         sz = num_tsb_entries / 8;
767         sz = (sz + 7UL) & ~7UL;
768         iommu->arena.map = kmalloc(sz, GFP_KERNEL);
769         if (!iommu->arena.map) {
770                 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
771                 prom_halt();
772         }
773         memset(iommu->arena.map, 0, sz);
774         iommu->arena.limit = num_tsb_entries;
775
776         probe_existing_entries(pbm, iommu);
777 }
778
779 static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle)
780 {
781         struct pci_pbm_info *pbm;
782         unsigned int busrange[2];
783         int err, i;
784
785         if (devhandle & 0x40)
786                 pbm = &p->pbm_B;
787         else
788                 pbm = &p->pbm_A;
789
790         pbm->parent = p;
791         pbm->prom_node = prom_node;
792         pbm->pci_first_slot = 1;
793
794         pbm->devhandle = devhandle;
795
796         sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
797                 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
798
799         printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
800
801         prom_getstring(prom_node, "name",
802                        pbm->prom_name, sizeof(pbm->prom_name));
803
804         err = prom_getproperty(prom_node, "ranges",
805                                (char *) pbm->pbm_ranges,
806                                sizeof(pbm->pbm_ranges));
807         if (err == 0 || err == -1) {
808                 prom_printf("%s: Fatal error, no ranges property.\n",
809                             pbm->name);
810                 prom_halt();
811         }
812
813         pbm->num_pbm_ranges =
814                 (err / sizeof(struct linux_prom_pci_ranges));
815
816         /* Mask out the top 8 bits of the ranges, leaving the real
817          * physical address.
818          */
819         for (i = 0; i < pbm->num_pbm_ranges; i++)
820                 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
821
822         pci_sun4v_determine_mem_io_space(pbm);
823         pbm_register_toplevel_resources(p, pbm);
824
825         err = prom_getproperty(prom_node, "interrupt-map",
826                                (char *)pbm->pbm_intmap,
827                                sizeof(pbm->pbm_intmap));
828         if (err != -1) {
829                 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
830                 err = prom_getproperty(prom_node, "interrupt-map-mask",
831                                        (char *)&pbm->pbm_intmask,
832                                        sizeof(pbm->pbm_intmask));
833                 if (err == -1) {
834                         prom_printf("%s: Fatal error, no "
835                                     "interrupt-map-mask.\n", pbm->name);
836                         prom_halt();
837                 }
838         } else {
839                 pbm->num_pbm_intmap = 0;
840                 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
841         }
842
843         err = prom_getproperty(prom_node, "bus-range",
844                                (char *)&busrange[0],
845                                sizeof(busrange));
846         if (err == 0 || err == -1) {
847                 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
848                 prom_halt();
849         }
850         pbm->pci_first_busno = busrange[0];
851         pbm->pci_last_busno = busrange[1];
852
853         pci_sun4v_iommu_init(pbm);
854 }
855
856 void sun4v_pci_init(int node, char *model_name)
857 {
858         struct pci_controller_info *p;
859         struct pci_iommu *iommu;
860         struct linux_prom64_registers regs;
861         unsigned int devhandle;
862
863         prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
864         devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;;
865
866         for (p = pci_controller_root; p; p = p->next) {
867                 struct pci_pbm_info *pbm;
868
869                 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
870                         continue;
871
872                 pbm = (p->pbm_A.prom_node ?
873                        &p->pbm_A :
874                        &p->pbm_B);
875
876                 if (pbm->devhandle == (devhandle ^ 0x40))
877                         pci_sun4v_pbm_init(p, node, devhandle);
878         }
879
880         p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
881         if (!p) {
882                 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
883                 prom_halt();
884         }
885         memset(p, 0, sizeof(*p));
886
887         iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
888         if (!iommu) {
889                 prom_printf("SCHIZO: Fatal memory allocation error.\n");
890                 prom_halt();
891         }
892         memset(iommu, 0, sizeof(*iommu));
893         p->pbm_A.iommu = iommu;
894
895         iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
896         if (!iommu) {
897                 prom_printf("SCHIZO: Fatal memory allocation error.\n");
898                 prom_halt();
899         }
900         memset(iommu, 0, sizeof(*iommu));
901         p->pbm_B.iommu = iommu;
902
903         p->next = pci_controller_root;
904         pci_controller_root = p;
905
906         p->index = pci_num_controllers++;
907         p->pbms_same_domain = 0;
908
909         p->scan_bus = pci_sun4v_scan_bus;
910         p->irq_build = pci_sun4v_irq_build;
911         p->base_address_update = pci_sun4v_base_address_update;
912         p->resource_adjust = pci_sun4v_resource_adjust;
913         p->pci_ops = &pci_sun4v_ops;
914
915         /* Like PSYCHO and SCHIZO we have a 2GB aligned area
916          * for memory space.
917          */
918         pci_memspace_mask = 0x7fffffffUL;
919
920         pci_sun4v_pbm_init(p, node, devhandle);
921
922         prom_printf("sun4v_pci_init: Implement me.\n");
923         prom_halt();
924 }