/* pci_sun4v.c: SUN4V specific PCI controller support.
*
- * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
return iommu_batch_flush(p);
}
-static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
-{
- unsigned long n, i, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = find_next_zero_bit(arena->map, limit, start);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
- return -1;
- }
- }
-
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
-
- arena->hint = end;
-
- return n;
-}
-
-static void arena_free(struct iommu_arena *arena, unsigned long base,
- unsigned long npages)
-{
- unsigned long i;
-
- for (i = base; i < (base + npages); i++)
- __clear_bit(i, arena->map);
-}
-
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp)
{
iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
- entry = arena_alloc(&iommu->arena, npages);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags);
- if (unlikely(entry < 0L))
- goto arena_alloc_fail;
+ if (unlikely(entry == DMA_ERROR_CODE))
+ goto range_alloc_fail;
*dma_addrp = (iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT));
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock(&iommu->lock);
- arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, *dma_addrp, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
-arena_alloc_fail:
+range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
spin_lock_irqsave(&iommu->lock, flags);
- arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, dvma, npages);
do {
unsigned long num;
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
- entry = arena_alloc(&iommu->arena, npages);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags);
- if (unlikely(entry < 0L))
+ if (unlikely(entry == DMA_ERROR_CODE))
goto bad;
bus_addr = (iommu->page_table_map_base +
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock(&iommu->lock);
- arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, bus_addr, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
return DMA_ERROR_CODE;
spin_lock_irqsave(&iommu->lock, flags);
- entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, bus_addr, npages);
+ entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
do {
unsigned long num;
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
-
-static long fill_sg(long entry, struct device *dev,
- struct scatterlist *sg,
- int nused, int nelems, unsigned long prot)
-{
- struct scatterlist *dma_sg = sg;
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
-
- iommu_batch_start(dev, prot, entry);
-
- for (i = 0; i < nused; i++) {
- unsigned long pteval = ~0UL;
- u32 dma_npages;
-
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
- dma_sg->dma_length +
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
- do {
- unsigned long offset;
- signed int len;
-
- /* If we are here, we know we have at least one
- * more page to map. So walk forward until we
- * hit a page crossing, and begin creating new
- * mappings from that spot.
- */
- for (;;) {
- unsigned long tmp;
-
- tmp = SG_ENT_PHYS_ADDRESS(sg);
- len = sg->length;
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = tmp & IO_PAGE_MASK;
- offset = tmp & (IO_PAGE_SIZE - 1UL);
- break;
- }
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
- offset = 0UL;
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
- break;
- }
- sg = sg_next(sg);
- nelems--;
- }
-
- pteval = (pteval & IOPTE_PAGE);
- while (len > 0) {
- long err;
-
- err = iommu_batch_add(pteval);
- if (unlikely(err < 0L))
- goto iommu_map_failed;
-
- pteval += IO_PAGE_SIZE;
- len -= (IO_PAGE_SIZE - offset);
- offset = 0;
- dma_npages--;
- }
-
- pteval = (pteval & IOPTE_PAGE) + len;
- sg = sg_next(sg);
- nelems--;
-
- /* Skip over any tail mappings we've fully mapped,
- * adjusting pteval along the way. Stop when we
- * detect a page crossing event.
- */
- while (nelems &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
- ((pteval ^
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
- pteval += sg->length;
- sg = sg_next(sg);
- nelems--;
- }
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
- pteval = ~0UL;
- } while (dma_npages != 0);
- dma_sg = sg_next(dma_sg);
- }
-
- if (unlikely(iommu_batch_end() < 0L))
- goto iommu_map_failed;
-
- local_irq_restore(flags);
- return 0;
-
-iommu_map_failed:
- local_irq_restore(flags);
- return -1L;
-}
-
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
+ unsigned long flags, npages, i, prot;
+ u32 dma_base, orig_dma_base;
+ struct scatterlist *sg;
struct iommu *iommu;
- unsigned long flags, npages, prot;
- u32 dma_base;
- struct scatterlist *sgtmp;
long entry, err;
- int used;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
if (unlikely(direction == DMA_NONE))
goto bad;
- /* Step 1: Prepare scatter list. */
- npages = prepare_sg(dev, sglist, nelems);
+ npages = calc_npages(sglist, nelems);
- /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
- entry = arena_alloc(&iommu->arena, npages);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags);
- if (unlikely(entry < 0L))
+ if (unlikely(entry == DMA_ERROR_CODE))
goto bad;
- dma_base = iommu->page_table_map_base +
+ orig_dma_base = dma_base = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
- /* Step 3: Normalize DMA addresses. */
- used = nelems;
-
- sgtmp = sglist;
- while (used && sgtmp->dma_length) {
- sgtmp->dma_address += dma_base;
- sgtmp = sg_next(sgtmp);
- used--;
- }
- used = nelems - used;
-
- /* Step 4: Create the mappings. */
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
- err = fill_sg(entry, dev, sglist, used, nelems, prot);
+ local_irq_save(flags);
+
+ iommu_batch_start(dev, prot, entry);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
+ unsigned long slen = sg->length;
+ unsigned long this_npages;
+
+ this_npages = iommu_num_pages(paddr, slen);
+
+ sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
+ sg->dma_length = slen;
+
+ paddr &= IO_PAGE_MASK;
+ while (this_npages--) {
+ err = iommu_batch_add(paddr);
+ if (unlikely(err < 0L)) {
+ local_irq_restore(flags);
+ goto iommu_map_failed;
+ }
+
+ paddr += IO_PAGE_SIZE;
+ dma_base += IO_PAGE_SIZE;
+ }
+ }
+
+ err = iommu_batch_end();
+
+ local_irq_restore(flags);
+
if (unlikely(err < 0L))
goto iommu_map_failed;
- return used;
+ return nelems;
bad:
if (printk_ratelimit())
iommu_map_failed:
spin_lock_irqsave(&iommu->lock, flags);
- arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, orig_dma_base, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
+ unsigned long flags, npages;
struct pci_pbm_info *pbm;
+ u32 devhandle, bus_addr;
struct iommu *iommu;
- unsigned long flags, i, npages;
- struct scatterlist *sg, *sgprv;
long entry;
- u32 devhandle, bus_addr;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
- sgprv = NULL;
- for_each_sg(sglist, sg, nelems, i) {
- if (sg->dma_length == 0)
- break;
-
- sgprv = sg;
- }
- npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
- bus_addr) >> IO_PAGE_SHIFT;
+ npages = calc_npages(sglist, nelems);
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
- arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, bus_addr, npages);
do {
unsigned long num;