1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
17 #include <asm/iommu.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
26 #include "iommu_common.h"
28 #include "pci_sun4v.h"
30 static unsigned long vpci_major = 1;
31 static unsigned long vpci_minor = 1;
33 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36 struct device *dev; /* Device mapping is for. */
37 unsigned long prot; /* IOMMU page protections */
38 unsigned long entry; /* Index into IOTSB. */
39 u64 *pglist; /* List of physical pages */
40 unsigned long npages; /* Number of pages in list. */
43 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45 /* Interrupts must be disabled. */
46 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
48 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
56 /* Interrupts must be disabled. */
57 static long iommu_batch_flush(struct iommu_batch *p)
59 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60 unsigned long devhandle = pbm->devhandle;
61 unsigned long prot = p->prot;
62 unsigned long entry = p->entry;
63 u64 *pglist = p->pglist;
64 unsigned long npages = p->npages;
69 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
70 npages, prot, __pa(pglist));
71 if (unlikely(num < 0)) {
72 if (printk_ratelimit())
73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
76 devhandle, HV_PCI_TSBID(0, entry),
77 npages, prot, __pa(pglist), num);
92 /* Interrupts must be disabled. */
93 static inline long iommu_batch_add(u64 phys_page)
95 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97 BUG_ON(p->npages >= PGLIST_NENTS);
99 p->pglist[p->npages++] = phys_page;
100 if (p->npages == PGLIST_NENTS)
101 return iommu_batch_flush(p);
106 /* Interrupts must be disabled. */
107 static inline long iommu_batch_end(void)
109 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
111 BUG_ON(p->npages >= PGLIST_NENTS);
113 return iommu_batch_flush(p);
116 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
117 dma_addr_t *dma_addrp, gfp_t gfp)
120 unsigned long flags, order, first_page, npages, n;
124 size = IO_PAGE_ALIGN(size);
125 order = get_order(size);
126 if (unlikely(order >= MAX_ORDER))
129 npages = size >> IO_PAGE_SHIFT;
131 first_page = __get_free_pages(gfp, order);
132 if (unlikely(first_page == 0UL))
135 memset((char *)first_page, 0, PAGE_SIZE << order);
137 iommu = dev->archdata.iommu;
139 spin_lock_irqsave(&iommu->lock, flags);
140 entry = iommu_range_alloc(dev, iommu, npages, NULL);
141 spin_unlock_irqrestore(&iommu->lock, flags);
143 if (unlikely(entry == DMA_ERROR_CODE))
144 goto range_alloc_fail;
146 *dma_addrp = (iommu->page_table_map_base +
147 (entry << IO_PAGE_SHIFT));
148 ret = (void *) first_page;
149 first_page = __pa(first_page);
151 local_irq_save(flags);
153 iommu_batch_start(dev,
154 (HV_PCI_MAP_ATTR_READ |
155 HV_PCI_MAP_ATTR_WRITE),
158 for (n = 0; n < npages; n++) {
159 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
160 if (unlikely(err < 0L))
164 if (unlikely(iommu_batch_end() < 0L))
167 local_irq_restore(flags);
172 /* Interrupts are disabled. */
173 spin_lock(&iommu->lock);
174 iommu_range_free(iommu, *dma_addrp, npages);
175 spin_unlock_irqrestore(&iommu->lock, flags);
178 free_pages(first_page, order);
182 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
185 struct pci_pbm_info *pbm;
187 unsigned long flags, order, npages, entry;
190 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
191 iommu = dev->archdata.iommu;
192 pbm = dev->archdata.host_controller;
193 devhandle = pbm->devhandle;
194 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
196 spin_lock_irqsave(&iommu->lock, flags);
198 iommu_range_free(iommu, dvma, npages);
203 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
207 } while (npages != 0);
209 spin_unlock_irqrestore(&iommu->lock, flags);
211 order = get_order(size);
213 free_pages((unsigned long)cpu, order);
216 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
217 enum dma_data_direction direction)
220 unsigned long flags, npages, oaddr;
221 unsigned long i, base_paddr;
226 iommu = dev->archdata.iommu;
228 if (unlikely(direction == DMA_NONE))
231 oaddr = (unsigned long)ptr;
232 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
233 npages >>= IO_PAGE_SHIFT;
235 spin_lock_irqsave(&iommu->lock, flags);
236 entry = iommu_range_alloc(dev, iommu, npages, NULL);
237 spin_unlock_irqrestore(&iommu->lock, flags);
239 if (unlikely(entry == DMA_ERROR_CODE))
242 bus_addr = (iommu->page_table_map_base +
243 (entry << IO_PAGE_SHIFT));
244 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
245 base_paddr = __pa(oaddr & IO_PAGE_MASK);
246 prot = HV_PCI_MAP_ATTR_READ;
247 if (direction != DMA_TO_DEVICE)
248 prot |= HV_PCI_MAP_ATTR_WRITE;
250 local_irq_save(flags);
252 iommu_batch_start(dev, prot, entry);
254 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
255 long err = iommu_batch_add(base_paddr);
256 if (unlikely(err < 0L))
259 if (unlikely(iommu_batch_end() < 0L))
262 local_irq_restore(flags);
267 if (printk_ratelimit())
269 return DMA_ERROR_CODE;
272 /* Interrupts are disabled. */
273 spin_lock(&iommu->lock);
274 iommu_range_free(iommu, bus_addr, npages);
275 spin_unlock_irqrestore(&iommu->lock, flags);
277 return DMA_ERROR_CODE;
280 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
281 size_t sz, enum dma_data_direction direction)
283 struct pci_pbm_info *pbm;
285 unsigned long flags, npages;
289 if (unlikely(direction == DMA_NONE)) {
290 if (printk_ratelimit())
295 iommu = dev->archdata.iommu;
296 pbm = dev->archdata.host_controller;
297 devhandle = pbm->devhandle;
299 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
300 npages >>= IO_PAGE_SHIFT;
301 bus_addr &= IO_PAGE_MASK;
303 spin_lock_irqsave(&iommu->lock, flags);
305 iommu_range_free(iommu, bus_addr, npages);
307 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
311 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
315 } while (npages != 0);
317 spin_unlock_irqrestore(&iommu->lock, flags);
320 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
321 int nelems, enum dma_data_direction direction)
323 unsigned long flags, npages, i, prot;
324 u32 dma_base, orig_dma_base;
325 struct scatterlist *sg;
329 /* Fast path single entry scatterlists. */
331 sglist->dma_address =
332 dma_4v_map_single(dev, sg_virt(sglist),
333 sglist->length, direction);
334 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
336 sglist->dma_length = sglist->length;
340 iommu = dev->archdata.iommu;
342 if (unlikely(direction == DMA_NONE))
345 npages = calc_npages(sglist, nelems);
347 spin_lock_irqsave(&iommu->lock, flags);
348 entry = iommu_range_alloc(dev, iommu, npages, NULL);
349 spin_unlock_irqrestore(&iommu->lock, flags);
351 if (unlikely(entry == DMA_ERROR_CODE))
354 orig_dma_base = dma_base = iommu->page_table_map_base +
355 (entry << IO_PAGE_SHIFT);
357 prot = HV_PCI_MAP_ATTR_READ;
358 if (direction != DMA_TO_DEVICE)
359 prot |= HV_PCI_MAP_ATTR_WRITE;
361 local_irq_save(flags);
363 iommu_batch_start(dev, prot, entry);
365 for_each_sg(sglist, sg, nelems, i) {
366 unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
367 unsigned long slen = sg->length;
368 unsigned long this_npages;
370 this_npages = iommu_num_pages(paddr, slen);
372 sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
373 sg->dma_length = slen;
375 paddr &= IO_PAGE_MASK;
376 while (this_npages--) {
377 err = iommu_batch_add(paddr);
378 if (unlikely(err < 0L)) {
379 local_irq_restore(flags);
380 goto iommu_map_failed;
383 paddr += IO_PAGE_SIZE;
384 dma_base += IO_PAGE_SIZE;
388 err = iommu_batch_end();
390 local_irq_restore(flags);
392 if (unlikely(err < 0L))
393 goto iommu_map_failed;
398 if (printk_ratelimit())
403 spin_lock_irqsave(&iommu->lock, flags);
404 iommu_range_free(iommu, orig_dma_base, npages);
405 spin_unlock_irqrestore(&iommu->lock, flags);
410 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
411 int nelems, enum dma_data_direction direction)
413 unsigned long flags, npages;
414 struct pci_pbm_info *pbm;
415 u32 devhandle, bus_addr;
419 if (unlikely(direction == DMA_NONE)) {
420 if (printk_ratelimit())
424 iommu = dev->archdata.iommu;
425 pbm = dev->archdata.host_controller;
426 devhandle = pbm->devhandle;
428 bus_addr = sglist->dma_address & IO_PAGE_MASK;
430 npages = calc_npages(sglist, nelems);
432 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
434 spin_lock_irqsave(&iommu->lock, flags);
436 iommu_range_free(iommu, bus_addr, npages);
441 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
445 } while (npages != 0);
447 spin_unlock_irqrestore(&iommu->lock, flags);
450 static void dma_4v_sync_single_for_cpu(struct device *dev,
451 dma_addr_t bus_addr, size_t sz,
452 enum dma_data_direction direction)
454 /* Nothing to do... */
457 static void dma_4v_sync_sg_for_cpu(struct device *dev,
458 struct scatterlist *sglist, int nelems,
459 enum dma_data_direction direction)
461 /* Nothing to do... */
464 const struct dma_ops sun4v_dma_ops = {
465 .alloc_coherent = dma_4v_alloc_coherent,
466 .free_coherent = dma_4v_free_coherent,
467 .map_single = dma_4v_map_single,
468 .unmap_single = dma_4v_unmap_single,
469 .map_sg = dma_4v_map_sg,
470 .unmap_sg = dma_4v_unmap_sg,
471 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
472 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
475 static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
477 struct property *prop;
478 struct device_node *dp;
481 prop = of_find_property(dp, "66mhz-capable", NULL);
482 pbm->is_66mhz_capable = (prop != NULL);
483 pbm->pci_bus = pci_scan_one_pbm(pbm);
485 /* XXX register error interrupt handlers XXX */
488 static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
491 struct iommu_arena *arena = &iommu->arena;
492 unsigned long i, cnt = 0;
495 devhandle = pbm->devhandle;
496 for (i = 0; i < arena->limit; i++) {
497 unsigned long ret, io_attrs, ra;
499 ret = pci_sun4v_iommu_getmap(devhandle,
503 if (page_in_phys_avail(ra)) {
504 pci_sun4v_iommu_demap(devhandle,
505 HV_PCI_TSBID(0, i), 1);
508 __set_bit(i, arena->map);
516 static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
518 struct iommu *iommu = pbm->iommu;
519 struct property *prop;
520 unsigned long num_tsb_entries, sz, tsbsize;
521 u32 vdma[2], dma_mask, dma_offset;
523 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
525 u32 *val = prop->value;
530 /* No property, use default values. */
531 vdma[0] = 0x80000000;
532 vdma[1] = 0x80000000;
535 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
536 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
541 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
542 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
543 tsbsize = num_tsb_entries * sizeof(iopte_t);
545 dma_offset = vdma[0];
547 /* Setup initial software IOMMU state. */
548 spin_lock_init(&iommu->lock);
549 iommu->ctx_lowest_free = 1;
550 iommu->page_table_map_base = dma_offset;
551 iommu->dma_addr_mask = dma_mask;
553 /* Allocate and initialize the free area map. */
554 sz = (num_tsb_entries + 7) / 8;
555 sz = (sz + 7UL) & ~7UL;
556 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
557 if (!iommu->arena.map) {
558 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
561 iommu->arena.limit = num_tsb_entries;
563 sz = probe_existing_entries(pbm, iommu);
565 printk("%s: Imported %lu TSB entries from OBP\n",
569 #ifdef CONFIG_PCI_MSI
570 struct pci_sun4v_msiq_entry {
572 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
573 #define MSIQ_VERSION_SHIFT 32
574 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
575 #define MSIQ_TYPE_SHIFT 0
576 #define MSIQ_TYPE_NONE 0x00
577 #define MSIQ_TYPE_MSG 0x01
578 #define MSIQ_TYPE_MSI32 0x02
579 #define MSIQ_TYPE_MSI64 0x03
580 #define MSIQ_TYPE_INTX 0x08
581 #define MSIQ_TYPE_NONE2 0xff
586 u64 req_id; /* bus/device/func */
587 #define MSIQ_REQID_BUS_MASK 0xff00UL
588 #define MSIQ_REQID_BUS_SHIFT 8
589 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
590 #define MSIQ_REQID_DEVICE_SHIFT 3
591 #define MSIQ_REQID_FUNC_MASK 0x0007UL
592 #define MSIQ_REQID_FUNC_SHIFT 0
596 /* The format of this value is message type dependent.
597 * For MSI bits 15:0 are the data from the MSI packet.
598 * For MSI-X bits 31:0 are the data from the MSI packet.
599 * For MSG, the message code and message routing code where:
600 * bits 39:32 is the bus/device/fn of the msg target-id
601 * bits 18:16 is the message routing code
602 * bits 7:0 is the message code
603 * For INTx the low order 2-bits are:
614 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
617 unsigned long err, limit;
619 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
623 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
624 if (unlikely(*head >= limit))
630 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
631 unsigned long msiqid, unsigned long *head,
634 struct pci_sun4v_msiq_entry *ep;
635 unsigned long err, type;
637 /* Note: void pointer arithmetic, 'head' is a byte offset */
638 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
639 (pbm->msiq_ent_count *
640 sizeof(struct pci_sun4v_msiq_entry))) +
643 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
646 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
647 if (unlikely(type != MSIQ_TYPE_MSI32 &&
648 type != MSIQ_TYPE_MSI64))
653 err = pci_sun4v_msi_setstate(pbm->devhandle,
654 ep->msi_data /* msi_num */,
659 /* Clear the entry. */
660 ep->version_type &= ~MSIQ_TYPE_MASK;
662 (*head) += sizeof(struct pci_sun4v_msiq_entry);
664 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
670 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
675 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
682 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
683 unsigned long msi, int is_msi64)
685 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
687 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
689 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
691 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
696 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
698 unsigned long err, msiqid;
700 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
704 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
709 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
711 unsigned long q_size, alloc_size, pages, order;
714 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
715 alloc_size = (pbm->msiq_num * q_size);
716 order = get_order(alloc_size);
717 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
719 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
723 memset((char *)pages, 0, PAGE_SIZE << order);
724 pbm->msi_queues = (void *) pages;
726 for (i = 0; i < pbm->msiq_num; i++) {
727 unsigned long err, base = __pa(pages + (i * q_size));
728 unsigned long ret1, ret2;
730 err = pci_sun4v_msiq_conf(pbm->devhandle,
732 base, pbm->msiq_ent_count);
734 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
739 err = pci_sun4v_msiq_info(pbm->devhandle,
743 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
747 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
748 printk(KERN_ERR "MSI: Bogus qconf "
749 "expected[%lx:%x] got[%lx:%lx]\n",
750 base, pbm->msiq_ent_count,
759 free_pages(pages, order);
763 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
765 unsigned long q_size, alloc_size, pages, order;
768 for (i = 0; i < pbm->msiq_num; i++) {
769 unsigned long msiqid = pbm->msiq_first + i;
771 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
774 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
775 alloc_size = (pbm->msiq_num * q_size);
776 order = get_order(alloc_size);
778 pages = (unsigned long) pbm->msi_queues;
780 free_pages(pages, order);
782 pbm->msi_queues = NULL;
785 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
786 unsigned long msiqid,
787 unsigned long devino)
789 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
794 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
796 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
802 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
803 .get_head = pci_sun4v_get_head,
804 .dequeue_msi = pci_sun4v_dequeue_msi,
805 .set_head = pci_sun4v_set_head,
806 .msi_setup = pci_sun4v_msi_setup,
807 .msi_teardown = pci_sun4v_msi_teardown,
808 .msiq_alloc = pci_sun4v_msiq_alloc,
809 .msiq_free = pci_sun4v_msiq_free,
810 .msiq_build_irq = pci_sun4v_msiq_build_irq,
813 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
815 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
817 #else /* CONFIG_PCI_MSI */
818 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
821 #endif /* !(CONFIG_PCI_MSI) */
823 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p,
824 struct device_node *dp, u32 devhandle)
826 struct pci_pbm_info *pbm;
828 if (devhandle & 0x40)
833 pbm->next = pci_pbm_root;
836 pbm->scan_bus = pci_sun4v_scan_bus;
837 pbm->pci_ops = &sun4v_pci_ops;
838 pbm->config_space_reg_bits = 12;
840 pbm->index = pci_num_pbms++;
845 pbm->devhandle = devhandle;
847 pbm->name = dp->full_name;
849 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
851 pci_determine_mem_io_space(pbm);
853 pci_get_pbm_props(pbm);
854 pci_sun4v_iommu_init(pbm);
855 pci_sun4v_msi_init(pbm);
858 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
860 static int hvapi_negotiated = 0;
861 struct pci_controller_info *p;
862 struct pci_pbm_info *pbm;
864 struct property *prop;
865 struct linux_prom64_registers *regs;
869 if (!hvapi_negotiated++) {
870 int err = sun4v_hvapi_register(HV_GRP_PCI,
875 prom_printf("SUN4V_PCI: Could not register hvapi, "
879 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
880 vpci_major, vpci_minor);
882 dma_ops = &sun4v_dma_ops;
885 prop = of_find_property(dp, "reg", NULL);
887 prom_printf("SUN4V_PCI: Could not find config registers\n");
892 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
894 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
895 if (pbm->devhandle == (devhandle ^ 0x40)) {
896 pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
901 for_each_possible_cpu(i) {
902 unsigned long page = get_zeroed_page(GFP_ATOMIC);
905 goto fatal_memory_error;
907 per_cpu(iommu_batch, i).pglist = (u64 *) page;
910 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
912 goto fatal_memory_error;
914 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
916 goto fatal_memory_error;
918 p->pbm_A.iommu = iommu;
920 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
922 goto fatal_memory_error;
924 p->pbm_B.iommu = iommu;
926 pci_sun4v_pbm_init(p, dp, devhandle);
930 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");