]> Pileus Git - ~andy/linux/blob - arch/sparc64/kernel/pci_sun4v.c
[SPARC64]: Fix C-function name called by sun4v_mna trap code.
[~andy/linux] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13
14 #include <asm/pbm.h>
15 #include <asm/iommu.h>
16 #include <asm/irq.h>
17 #include <asm/upa.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
21
22 #include "pci_impl.h"
23 #include "iommu_common.h"
24
25 #include "pci_sun4v.h"
26
27 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
28
29 struct sun4v_pglist {
30         u64     *pglist;
31 };
32
33 static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
34
35 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
36 {
37         unsigned long n, i, start, end, limit;
38         int pass;
39
40         limit = arena->limit;
41         start = arena->hint;
42         pass = 0;
43
44 again:
45         n = find_next_zero_bit(arena->map, limit, start);
46         end = n + npages;
47         if (unlikely(end >= limit)) {
48                 if (likely(pass < 1)) {
49                         limit = start;
50                         start = 0;
51                         pass++;
52                         goto again;
53                 } else {
54                         /* Scanned the whole thing, give up. */
55                         return -1;
56                 }
57         }
58
59         for (i = n; i < end; i++) {
60                 if (test_bit(i, arena->map)) {
61                         start = i + 1;
62                         goto again;
63                 }
64         }
65
66         for (i = n; i < end; i++)
67                 __set_bit(i, arena->map);
68
69         arena->hint = end;
70
71         return n;
72 }
73
74 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
75 {
76         unsigned long i;
77
78         for (i = base; i < (base + npages); i++)
79                 __clear_bit(i, arena->map);
80 }
81
82 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
83 {
84         struct pcidev_cookie *pcp;
85         struct pci_iommu *iommu;
86         unsigned long flags, order, first_page, npages, n;
87         void *ret;
88         long entry;
89         u64 *pglist;
90         u32 devhandle;
91         int cpu;
92
93         size = IO_PAGE_ALIGN(size);
94         order = get_order(size);
95         if (order >= MAX_ORDER)
96                 return NULL;
97
98         npages = size >> IO_PAGE_SHIFT;
99         if (npages > PGLIST_NENTS)
100                 return NULL;
101
102         first_page = __get_free_pages(GFP_ATOMIC, order);
103         if (first_page == 0UL)
104                 return NULL;
105         memset((char *)first_page, 0, PAGE_SIZE << order);
106
107         pcp = pdev->sysdata;
108         devhandle = pcp->pbm->devhandle;
109         iommu = pcp->pbm->iommu;
110
111         spin_lock_irqsave(&iommu->lock, flags);
112         entry = pci_arena_alloc(&iommu->arena, npages);
113         spin_unlock_irqrestore(&iommu->lock, flags);
114
115         if (unlikely(entry < 0L)) {
116                 free_pages(first_page, order);
117                 return NULL;
118         }
119
120         *dma_addrp = (iommu->page_table_map_base +
121                       (entry << IO_PAGE_SHIFT));
122         ret = (void *) first_page;
123         first_page = __pa(first_page);
124
125         cpu = get_cpu();
126
127         pglist = __get_cpu_var(iommu_pglists).pglist;
128         for (n = 0; n < npages; n++)
129                 pglist[n] = first_page + (n * PAGE_SIZE);
130
131         do {
132                 unsigned long num;
133
134                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
135                                           npages,
136                                           (HV_PCI_MAP_ATTR_READ |
137                                            HV_PCI_MAP_ATTR_WRITE),
138                                           __pa(pglist));
139                 entry += num;
140                 npages -= num;
141                 pglist += num;
142         } while (npages != 0);
143
144         put_cpu();
145
146         return ret;
147 }
148
149 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
150 {
151         struct pcidev_cookie *pcp;
152         struct pci_iommu *iommu;
153         unsigned long flags, order, npages, entry;
154         u32 devhandle;
155
156         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
157         pcp = pdev->sysdata;
158         iommu = pcp->pbm->iommu;
159         devhandle = pcp->pbm->devhandle;
160         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
161
162         spin_lock_irqsave(&iommu->lock, flags);
163
164         pci_arena_free(&iommu->arena, entry, npages);
165
166         do {
167                 unsigned long num;
168
169                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
170                                             npages);
171                 entry += num;
172                 npages -= num;
173         } while (npages != 0);
174
175         spin_unlock_irqrestore(&iommu->lock, flags);
176
177         order = get_order(size);
178         if (order < 10)
179                 free_pages((unsigned long)cpu, order);
180 }
181
182 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
183 {
184         struct pcidev_cookie *pcp;
185         struct pci_iommu *iommu;
186         unsigned long flags, npages, oaddr;
187         unsigned long i, base_paddr;
188         u32 devhandle, bus_addr, ret;
189         unsigned long prot;
190         long entry;
191         u64 *pglist;
192         int cpu;
193
194         pcp = pdev->sysdata;
195         iommu = pcp->pbm->iommu;
196         devhandle = pcp->pbm->devhandle;
197
198         if (unlikely(direction == PCI_DMA_NONE))
199                 goto bad;
200
201         oaddr = (unsigned long)ptr;
202         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
203         npages >>= IO_PAGE_SHIFT;
204         if (unlikely(npages > PGLIST_NENTS))
205                 goto bad;
206
207         spin_lock_irqsave(&iommu->lock, flags);
208         entry = pci_arena_alloc(&iommu->arena, npages);
209         spin_unlock_irqrestore(&iommu->lock, flags);
210
211         if (unlikely(entry < 0L))
212                 goto bad;
213
214         bus_addr = (iommu->page_table_map_base +
215                     (entry << IO_PAGE_SHIFT));
216         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
217         base_paddr = __pa(oaddr & IO_PAGE_MASK);
218         prot = HV_PCI_MAP_ATTR_READ;
219         if (direction != PCI_DMA_TODEVICE)
220                 prot |= HV_PCI_MAP_ATTR_WRITE;
221
222         cpu = get_cpu();
223
224         pglist = __get_cpu_var(iommu_pglists).pglist;
225         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
226                 pglist[i] = base_paddr;
227
228         do {
229                 unsigned long num;
230
231                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
232                                           npages, prot,
233                                           __pa(pglist));
234                 entry += num;
235                 npages -= num;
236                 pglist += num;
237         } while (npages != 0);
238
239         put_cpu();
240
241         return ret;
242
243 bad:
244         if (printk_ratelimit())
245                 WARN_ON(1);
246         return PCI_DMA_ERROR_CODE;
247 }
248
249 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
250 {
251         struct pcidev_cookie *pcp;
252         struct pci_iommu *iommu;
253         unsigned long flags, npages;
254         long entry;
255         u32 devhandle;
256
257         if (unlikely(direction == PCI_DMA_NONE)) {
258                 if (printk_ratelimit())
259                         WARN_ON(1);
260                 return;
261         }
262
263         pcp = pdev->sysdata;
264         iommu = pcp->pbm->iommu;
265         devhandle = pcp->pbm->devhandle;
266
267         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
268         npages >>= IO_PAGE_SHIFT;
269         bus_addr &= IO_PAGE_MASK;
270
271         spin_lock_irqsave(&iommu->lock, flags);
272
273         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
274         pci_arena_free(&iommu->arena, entry, npages);
275
276         do {
277                 unsigned long num;
278
279                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
280                                             npages);
281                 entry += num;
282                 npages -= num;
283         } while (npages != 0);
284
285         spin_unlock_irqrestore(&iommu->lock, flags);
286 }
287
288 #define SG_ENT_PHYS_ADDRESS(SG) \
289         (__pa(page_address((SG)->page)) + (SG)->offset)
290
291 static inline void fill_sg(long entry, u32 devhandle,
292                            struct scatterlist *sg,
293                            int nused, int nelems, unsigned long prot)
294 {
295         struct scatterlist *dma_sg = sg;
296         struct scatterlist *sg_end = sg + nelems;
297         int i, cpu, pglist_ent;
298         u64 *pglist;
299
300         cpu = get_cpu();
301         pglist = __get_cpu_var(iommu_pglists).pglist;
302         pglist_ent = 0;
303         for (i = 0; i < nused; i++) {
304                 unsigned long pteval = ~0UL;
305                 u32 dma_npages;
306
307                 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
308                               dma_sg->dma_length +
309                               ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
310                 do {
311                         unsigned long offset;
312                         signed int len;
313
314                         /* If we are here, we know we have at least one
315                          * more page to map.  So walk forward until we
316                          * hit a page crossing, and begin creating new
317                          * mappings from that spot.
318                          */
319                         for (;;) {
320                                 unsigned long tmp;
321
322                                 tmp = SG_ENT_PHYS_ADDRESS(sg);
323                                 len = sg->length;
324                                 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
325                                         pteval = tmp & IO_PAGE_MASK;
326                                         offset = tmp & (IO_PAGE_SIZE - 1UL);
327                                         break;
328                                 }
329                                 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
330                                         pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
331                                         offset = 0UL;
332                                         len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
333                                         break;
334                                 }
335                                 sg++;
336                         }
337
338                         pteval = (pteval & IOPTE_PAGE);
339                         while (len > 0) {
340                                 pglist[pglist_ent++] = pteval;
341                                 pteval += IO_PAGE_SIZE;
342                                 len -= (IO_PAGE_SIZE - offset);
343                                 offset = 0;
344                                 dma_npages--;
345                         }
346
347                         pteval = (pteval & IOPTE_PAGE) + len;
348                         sg++;
349
350                         /* Skip over any tail mappings we've fully mapped,
351                          * adjusting pteval along the way.  Stop when we
352                          * detect a page crossing event.
353                          */
354                         while (sg < sg_end &&
355                                (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
356                                (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
357                                ((pteval ^
358                                  (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
359                                 pteval += sg->length;
360                                 sg++;
361                         }
362                         if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
363                                 pteval = ~0UL;
364                 } while (dma_npages != 0);
365                 dma_sg++;
366         }
367
368         BUG_ON(pglist_ent == 0);
369
370         do {
371                 unsigned long num;
372
373                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
374                                             pglist_ent);
375                 entry += num;
376                 pglist_ent -= num;
377         } while (pglist_ent != 0);
378
379         put_cpu();
380 }
381
382 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
383 {
384         struct pcidev_cookie *pcp;
385         struct pci_iommu *iommu;
386         unsigned long flags, npages, prot;
387         u32 devhandle, dma_base;
388         struct scatterlist *sgtmp;
389         long entry;
390         int used;
391
392         /* Fast path single entry scatterlists. */
393         if (nelems == 1) {
394                 sglist->dma_address =
395                         pci_4v_map_single(pdev,
396                                           (page_address(sglist->page) + sglist->offset),
397                                           sglist->length, direction);
398                 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
399                         return 0;
400                 sglist->dma_length = sglist->length;
401                 return 1;
402         }
403
404         pcp = pdev->sysdata;
405         iommu = pcp->pbm->iommu;
406         devhandle = pcp->pbm->devhandle;
407         
408         if (unlikely(direction == PCI_DMA_NONE))
409                 goto bad;
410
411         /* Step 1: Prepare scatter list. */
412         npages = prepare_sg(sglist, nelems);
413         if (unlikely(npages > PGLIST_NENTS))
414                 goto bad;
415
416         /* Step 2: Allocate a cluster and context, if necessary. */
417         spin_lock_irqsave(&iommu->lock, flags);
418         entry = pci_arena_alloc(&iommu->arena, npages);
419         spin_unlock_irqrestore(&iommu->lock, flags);
420
421         if (unlikely(entry < 0L))
422                 goto bad;
423
424         dma_base = iommu->page_table_map_base +
425                 (entry << IO_PAGE_SHIFT);
426
427         /* Step 3: Normalize DMA addresses. */
428         used = nelems;
429
430         sgtmp = sglist;
431         while (used && sgtmp->dma_length) {
432                 sgtmp->dma_address += dma_base;
433                 sgtmp++;
434                 used--;
435         }
436         used = nelems - used;
437
438         /* Step 4: Create the mappings. */
439         prot = HV_PCI_MAP_ATTR_READ;
440         if (direction != PCI_DMA_TODEVICE)
441                 prot |= HV_PCI_MAP_ATTR_WRITE;
442
443         fill_sg(entry, devhandle, sglist, used, nelems, prot);
444
445         return used;
446
447 bad:
448         if (printk_ratelimit())
449                 WARN_ON(1);
450         return 0;
451 }
452
453 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
454 {
455         struct pcidev_cookie *pcp;
456         struct pci_iommu *iommu;
457         unsigned long flags, i, npages;
458         long entry;
459         u32 devhandle, bus_addr;
460
461         if (unlikely(direction == PCI_DMA_NONE)) {
462                 if (printk_ratelimit())
463                         WARN_ON(1);
464         }
465
466         pcp = pdev->sysdata;
467         iommu = pcp->pbm->iommu;
468         devhandle = pcp->pbm->devhandle;
469         
470         bus_addr = sglist->dma_address & IO_PAGE_MASK;
471
472         for (i = 1; i < nelems; i++)
473                 if (sglist[i].dma_length == 0)
474                         break;
475         i--;
476         npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
477                   bus_addr) >> IO_PAGE_SHIFT;
478
479         entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
480
481         spin_lock_irqsave(&iommu->lock, flags);
482
483         pci_arena_free(&iommu->arena, entry, npages);
484
485         do {
486                 unsigned long num;
487
488                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
489                                             npages);
490                 entry += num;
491                 npages -= num;
492         } while (npages != 0);
493
494         spin_unlock_irqrestore(&iommu->lock, flags);
495 }
496
497 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
498 {
499         /* Nothing to do... */
500 }
501
502 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
503 {
504         /* Nothing to do... */
505 }
506
507 struct pci_iommu_ops pci_sun4v_iommu_ops = {
508         .alloc_consistent               = pci_4v_alloc_consistent,
509         .free_consistent                = pci_4v_free_consistent,
510         .map_single                     = pci_4v_map_single,
511         .unmap_single                   = pci_4v_unmap_single,
512         .map_sg                         = pci_4v_map_sg,
513         .unmap_sg                       = pci_4v_unmap_sg,
514         .dma_sync_single_for_cpu        = pci_4v_dma_sync_single_for_cpu,
515         .dma_sync_sg_for_cpu            = pci_4v_dma_sync_sg_for_cpu,
516 };
517
518 /* SUN4V PCI configuration space accessors. */
519
520 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus)
521 {
522         if (bus < pbm->pci_first_busno ||
523             bus > pbm->pci_last_busno)
524                 return 1;
525         return 0;
526 }
527
528 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
529                                   int where, int size, u32 *value)
530 {
531         struct pci_pbm_info *pbm = bus_dev->sysdata;
532         u32 devhandle = pbm->devhandle;
533         unsigned int bus = bus_dev->number;
534         unsigned int device = PCI_SLOT(devfn);
535         unsigned int func = PCI_FUNC(devfn);
536         unsigned long ret;
537
538         if (pci_sun4v_out_of_range(pbm, bus)) {
539                 ret = ~0UL;
540         } else {
541                 ret = pci_sun4v_config_get(devhandle,
542                                 HV_PCI_DEVICE_BUILD(bus, device, func),
543                                 where, size);
544 #if 0
545                 printk("read_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] "
546                        "== [%016lx]\n",
547                        devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
548                        where, size, ret);
549 #endif
550         }
551         switch (size) {
552         case 1:
553                 *value = ret & 0xff;
554                 break;
555         case 2:
556                 *value = ret & 0xffff;
557                 break;
558         case 4:
559                 *value = ret & 0xffffffff;
560                 break;
561         };
562
563
564         return PCIBIOS_SUCCESSFUL;
565 }
566
567 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
568                                    int where, int size, u32 value)
569 {
570         struct pci_pbm_info *pbm = bus_dev->sysdata;
571         u32 devhandle = pbm->devhandle;
572         unsigned int bus = bus_dev->number;
573         unsigned int device = PCI_SLOT(devfn);
574         unsigned int func = PCI_FUNC(devfn);
575         unsigned long ret;
576
577         if (pci_sun4v_out_of_range(pbm, bus)) {
578                 /* Do nothing. */
579         } else {
580                 ret = pci_sun4v_config_put(devhandle,
581                                 HV_PCI_DEVICE_BUILD(bus, device, func),
582                                 where, size, value);
583 #if 0
584                 printk("write_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] "
585                        "val[%08x] == [%016lx]\n",
586                        devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
587                        where, size, value, ret);
588 #endif
589         }
590         return PCIBIOS_SUCCESSFUL;
591 }
592
593 static struct pci_ops pci_sun4v_ops = {
594         .read =         pci_sun4v_read_pci_cfg,
595         .write =        pci_sun4v_write_pci_cfg,
596 };
597
598
599 static void pbm_scan_bus(struct pci_controller_info *p,
600                          struct pci_pbm_info *pbm)
601 {
602         struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
603
604         if (!cookie) {
605                 prom_printf("%s: Critical allocation failure.\n", pbm->name);
606                 prom_halt();
607         }
608
609         /* All we care about is the PBM. */
610         memset(cookie, 0, sizeof(*cookie));
611         cookie->pbm = pbm;
612
613         pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
614                                     p->pci_ops,
615                                     pbm);
616 #if 0
617         pci_fixup_host_bridge_self(pbm->pci_bus);
618         pbm->pci_bus->self->sysdata = cookie;
619 #endif
620
621         pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
622                                 prom_getchild(pbm->prom_node));
623         pci_record_assignments(pbm, pbm->pci_bus);
624         pci_assign_unassigned(pbm, pbm->pci_bus);
625         pci_fixup_irq(pbm, pbm->pci_bus);
626         pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
627         pci_setup_busmastering(pbm, pbm->pci_bus);
628 }
629
630 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
631 {
632         if (p->pbm_A.prom_node) {
633                 p->pbm_A.is_66mhz_capable =
634                         prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
635
636                 pbm_scan_bus(p, &p->pbm_A);
637         }
638         if (p->pbm_B.prom_node) {
639                 p->pbm_B.is_66mhz_capable =
640                         prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
641
642                 pbm_scan_bus(p, &p->pbm_B);
643         }
644
645         /* XXX register error interrupt handlers XXX */
646 }
647
648 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
649                                         struct pci_dev *pdev,
650                                         unsigned int devino)
651 {
652         u32 devhandle = pbm->devhandle;
653         int pil;
654
655         pil = 4;
656         if (pdev) {
657                 switch ((pdev->class >> 16) & 0xff) {
658                 case PCI_BASE_CLASS_STORAGE:
659                         pil = 4;
660                         break;
661
662                 case PCI_BASE_CLASS_NETWORK:
663                         pil = 6;
664                         break;
665
666                 case PCI_BASE_CLASS_DISPLAY:
667                         pil = 9;
668                         break;
669
670                 case PCI_BASE_CLASS_MULTIMEDIA:
671                 case PCI_BASE_CLASS_MEMORY:
672                 case PCI_BASE_CLASS_BRIDGE:
673                 case PCI_BASE_CLASS_SERIAL:
674                         pil = 10;
675                         break;
676
677                 default:
678                         pil = 4;
679                         break;
680                 };
681         }
682         BUG_ON(PIL_RESERVED(pil));
683
684         return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
685 }
686
687 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
688 {
689         struct pcidev_cookie *pcp = pdev->sysdata;
690         struct pci_pbm_info *pbm = pcp->pbm;
691         struct resource *res, *root;
692         u32 reg;
693         int where, size, is_64bit;
694
695         res = &pdev->resource[resource];
696         if (resource < 6) {
697                 where = PCI_BASE_ADDRESS_0 + (resource * 4);
698         } else if (resource == PCI_ROM_RESOURCE) {
699                 where = pdev->rom_base_reg;
700         } else {
701                 /* Somebody might have asked allocation of a non-standard resource */
702                 return;
703         }
704
705         /* XXX 64-bit MEM handling is not %100 correct... XXX */
706         is_64bit = 0;
707         if (res->flags & IORESOURCE_IO)
708                 root = &pbm->io_space;
709         else {
710                 root = &pbm->mem_space;
711                 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
712                     == PCI_BASE_ADDRESS_MEM_TYPE_64)
713                         is_64bit = 1;
714         }
715
716         size = res->end - res->start;
717         pci_read_config_dword(pdev, where, &reg);
718         reg = ((reg & size) |
719                (((u32)(res->start - root->start)) & ~size));
720         if (resource == PCI_ROM_RESOURCE) {
721                 reg |= PCI_ROM_ADDRESS_ENABLE;
722                 res->flags |= IORESOURCE_ROM_ENABLE;
723         }
724         pci_write_config_dword(pdev, where, reg);
725
726         /* This knows that the upper 32-bits of the address
727          * must be zero.  Our PCI common layer enforces this.
728          */
729         if (is_64bit)
730                 pci_write_config_dword(pdev, where + 4, 0);
731 }
732
733 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
734                                       struct resource *res,
735                                       struct resource *root)
736 {
737         res->start += root->start;
738         res->end += root->start;
739 }
740
741 /* Use ranges property to determine where PCI MEM, I/O, and Config
742  * space are for this PCI bus module.
743  */
744 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
745 {
746         int i, saw_mem, saw_io;
747
748         saw_mem = saw_io = 0;
749         for (i = 0; i < pbm->num_pbm_ranges; i++) {
750                 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
751                 unsigned long a;
752                 int type;
753
754                 type = (pr->child_phys_hi >> 24) & 0x3;
755                 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
756                      ((unsigned long)pr->parent_phys_lo  <<  0UL));
757
758                 switch (type) {
759                 case 1:
760                         /* 16-bit IO space, 16MB */
761                         pbm->io_space.start = a;
762                         pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
763                         pbm->io_space.flags = IORESOURCE_IO;
764                         saw_io = 1;
765                         break;
766
767                 case 2:
768                         /* 32-bit MEM space, 2GB */
769                         pbm->mem_space.start = a;
770                         pbm->mem_space.end = a + (0x80000000UL - 1UL);
771                         pbm->mem_space.flags = IORESOURCE_MEM;
772                         saw_mem = 1;
773                         break;
774
775                 case 3:
776                         /* XXX 64-bit MEM handling XXX */
777
778                 default:
779                         break;
780                 };
781         }
782
783         if (!saw_io || !saw_mem) {
784                 prom_printf("%s: Fatal error, missing %s PBM range.\n",
785                             pbm->name,
786                             (!saw_io ? "IO" : "MEM"));
787                 prom_halt();
788         }
789
790         printk("%s: PCI IO[%lx] MEM[%lx]\n",
791                pbm->name,
792                pbm->io_space.start,
793                pbm->mem_space.start);
794 }
795
796 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
797                                             struct pci_pbm_info *pbm)
798 {
799         pbm->io_space.name = pbm->mem_space.name = pbm->name;
800
801         request_resource(&ioport_resource, &pbm->io_space);
802         request_resource(&iomem_resource, &pbm->mem_space);
803         pci_register_legacy_regions(&pbm->io_space,
804                                     &pbm->mem_space);
805 }
806
807 static void probe_existing_entries(struct pci_pbm_info *pbm,
808                                    struct pci_iommu *iommu)
809 {
810         struct pci_iommu_arena *arena = &iommu->arena;
811         unsigned long i;
812         u32 devhandle;
813
814         devhandle = pbm->devhandle;
815         for (i = 0; i < arena->limit; i++) {
816                 unsigned long ret, io_attrs, ra;
817
818                 ret = pci_sun4v_iommu_getmap(devhandle,
819                                              HV_PCI_TSBID(0, i),
820                                              &io_attrs, &ra);
821                 if (ret == HV_EOK)
822                         __set_bit(i, arena->map);
823         }
824 }
825
826 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
827 {
828         struct pci_iommu *iommu = pbm->iommu;
829         unsigned long num_tsb_entries, sz;
830         u32 vdma[2], dma_mask, dma_offset;
831         int err, tsbsize;
832
833         err = prom_getproperty(pbm->prom_node, "virtual-dma",
834                                (char *)&vdma[0], sizeof(vdma));
835         if (err == 0 || err == -1) {
836                 /* No property, use default values. */
837                 vdma[0] = 0x80000000;
838                 vdma[1] = 0x80000000;
839         }
840
841         dma_mask = vdma[0];
842         switch (vdma[1]) {
843                 case 0x20000000:
844                         dma_mask |= 0x1fffffff;
845                         tsbsize = 64;
846                         break;
847
848                 case 0x40000000:
849                         dma_mask |= 0x3fffffff;
850                         tsbsize = 128;
851                         break;
852
853                 case 0x80000000:
854                         dma_mask |= 0x7fffffff;
855                         tsbsize = 128;
856                         break;
857
858                 default:
859                         prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
860                         prom_halt();
861         };
862
863         num_tsb_entries = tsbsize / sizeof(iopte_t);
864
865         dma_offset = vdma[0];
866
867         /* Setup initial software IOMMU state. */
868         spin_lock_init(&iommu->lock);
869         iommu->ctx_lowest_free = 1;
870         iommu->page_table_map_base = dma_offset;
871         iommu->dma_addr_mask = dma_mask;
872
873         /* Allocate and initialize the free area map.  */
874         sz = num_tsb_entries / 8;
875         sz = (sz + 7UL) & ~7UL;
876         iommu->arena.map = kmalloc(sz, GFP_KERNEL);
877         if (!iommu->arena.map) {
878                 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
879                 prom_halt();
880         }
881         memset(iommu->arena.map, 0, sz);
882         iommu->arena.limit = num_tsb_entries;
883
884         probe_existing_entries(pbm, iommu);
885 }
886
887 /* Don't get this from the root nexus, get it from the "pci@0" node below.  */
888 static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
889 {
890         unsigned int busrange[2];
891         int prom_node = pbm->prom_node;
892         int err;
893
894         prom_node = prom_getchild(prom_node);
895         if (prom_node == 0) {
896                 prom_printf("%s: Fatal error, no child OBP node.\n", pbm->name);
897                 prom_halt();
898         }
899
900         err = prom_getproperty(prom_node, "bus-range",
901                                (char *)&busrange[0],
902                                sizeof(busrange));
903         if (err == 0 || err == -1) {
904                 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
905                 prom_halt();
906         }
907
908         pbm->pci_first_busno = busrange[0];
909         pbm->pci_last_busno = busrange[1];
910
911 }
912
913 static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
914 {
915         struct pci_pbm_info *pbm;
916         int err, i;
917
918         if (devhandle & 0x40)
919                 pbm = &p->pbm_B;
920         else
921                 pbm = &p->pbm_A;
922
923         pbm->parent = p;
924         pbm->prom_node = prom_node;
925         pbm->pci_first_slot = 1;
926
927         pbm->devhandle = devhandle;
928
929         sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
930                 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
931
932         printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
933
934         prom_getstring(prom_node, "name",
935                        pbm->prom_name, sizeof(pbm->prom_name));
936
937         err = prom_getproperty(prom_node, "ranges",
938                                (char *) pbm->pbm_ranges,
939                                sizeof(pbm->pbm_ranges));
940         if (err == 0 || err == -1) {
941                 prom_printf("%s: Fatal error, no ranges property.\n",
942                             pbm->name);
943                 prom_halt();
944         }
945
946         pbm->num_pbm_ranges =
947                 (err / sizeof(struct linux_prom_pci_ranges));
948
949         /* Mask out the top 8 bits of the ranges, leaving the real
950          * physical address.
951          */
952         for (i = 0; i < pbm->num_pbm_ranges; i++)
953                 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
954
955         pci_sun4v_determine_mem_io_space(pbm);
956         pbm_register_toplevel_resources(p, pbm);
957
958         err = prom_getproperty(prom_node, "interrupt-map",
959                                (char *)pbm->pbm_intmap,
960                                sizeof(pbm->pbm_intmap));
961         if (err != -1) {
962                 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
963                 err = prom_getproperty(prom_node, "interrupt-map-mask",
964                                        (char *)&pbm->pbm_intmask,
965                                        sizeof(pbm->pbm_intmask));
966                 if (err == -1) {
967                         prom_printf("%s: Fatal error, no "
968                                     "interrupt-map-mask.\n", pbm->name);
969                         prom_halt();
970                 }
971         } else {
972                 pbm->num_pbm_intmap = 0;
973                 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
974         }
975
976         pci_sun4v_get_bus_range(pbm);
977         pci_sun4v_iommu_init(pbm);
978 }
979
980 void sun4v_pci_init(int node, char *model_name)
981 {
982         struct pci_controller_info *p;
983         struct pci_iommu *iommu;
984         struct linux_prom64_registers regs;
985         u32 devhandle;
986         int i;
987
988         prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
989         devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
990
991         for (p = pci_controller_root; p; p = p->next) {
992                 struct pci_pbm_info *pbm;
993
994                 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
995                         continue;
996
997                 pbm = (p->pbm_A.prom_node ?
998                        &p->pbm_A :
999                        &p->pbm_B);
1000
1001                 if (pbm->devhandle == (devhandle ^ 0x40)) {
1002                         pci_sun4v_pbm_init(p, node, devhandle);
1003                         return;
1004                 }
1005         }
1006
1007         for (i = 0; i < NR_CPUS; i++) {
1008                 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1009
1010                 if (!page)
1011                         goto fatal_memory_error;
1012
1013                 per_cpu(iommu_pglists, i).pglist = (u64 *) page;
1014         }
1015
1016         p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1017         if (!p)
1018                 goto fatal_memory_error;
1019
1020         memset(p, 0, sizeof(*p));
1021
1022         iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1023         if (!iommu)
1024                 goto fatal_memory_error;
1025
1026         memset(iommu, 0, sizeof(*iommu));
1027         p->pbm_A.iommu = iommu;
1028
1029         iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1030         if (!iommu)
1031                 goto fatal_memory_error;
1032
1033         memset(iommu, 0, sizeof(*iommu));
1034         p->pbm_B.iommu = iommu;
1035
1036         p->next = pci_controller_root;
1037         pci_controller_root = p;
1038
1039         p->index = pci_num_controllers++;
1040         p->pbms_same_domain = 0;
1041
1042         p->scan_bus = pci_sun4v_scan_bus;
1043         p->irq_build = pci_sun4v_irq_build;
1044         p->base_address_update = pci_sun4v_base_address_update;
1045         p->resource_adjust = pci_sun4v_resource_adjust;
1046         p->pci_ops = &pci_sun4v_ops;
1047
1048         /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1049          * for memory space.
1050          */
1051         pci_memspace_mask = 0x7fffffffUL;
1052
1053         pci_sun4v_pbm_init(p, node, devhandle);
1054         return;
1055
1056 fatal_memory_error:
1057         prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1058         prom_halt();
1059 }