X-Git-Url: http://pileus.org/git/?a=blobdiff_plain;f=drivers%2Fiommu%2Farm-smmu.c;h=8911850c94445fa5adf3b41de168293e5adf7721;hb=cf2d45b19ddb361241f36d8c9f3d0b234a18459b;hp=1abfb5684ab7ebcb7c735e10c71625439490ca09;hpb=7cdcec991c06cd6d792b304851cc245cfec507a7;p=~andy%2Flinux diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1abfb5684ab..8911850c944 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -24,7 +24,7 @@ * - v7/v8 long-descriptor format * - Non-secure access to the SMMU * - 4k and 64k pages, with contiguous pte hints. - * - Up to 39-bit addressing + * - Up to 42-bit addressing (dependent on VA_BITS) * - Context fault reporting */ @@ -61,12 +61,13 @@ #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) /* Page table bits */ -#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) +#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) +#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) #if PAGE_SIZE == SZ_4K #define ARM_SMMU_PTE_CONT_ENTRIES 16 @@ -392,7 +393,7 @@ struct arm_smmu_domain { struct arm_smmu_cfg root_cfg; phys_addr_t output_mask; - spinlock_t lock; + struct mutex lock; }; static DEFINE_SPINLOCK(arm_smmu_devices_lock); @@ -900,7 +901,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) goto out_free_domain; smmu_domain->root_cfg.pgd = pgd; - spin_lock_init(&smmu_domain->lock); + mutex_init(&smmu_domain->lock); domain->priv = smmu_domain; return 0; @@ -1137,7 +1138,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't currently support domains * that cross between different SMMU chains. */ - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); if (!smmu_domain->leaf_smmu) { /* Now that we have a master, we can finalise the domain */ ret = arm_smmu_init_domain_context(domain, dev); @@ -1152,7 +1153,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) dev_name(device_smmu->dev)); goto err_unlock; } - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Looks ok, so add the device to the domain */ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); @@ -1162,7 +1163,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return arm_smmu_domain_add_master(smmu_domain, master); err_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); return ret; } @@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, unsigned long pfn, int flags, int stage) { pte_t *pte, *start; - pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; + pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; if (pmd_none(*pmd)) { /* Allocate a new set of tables */ @@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, } /* If no access, create a faulting entry to avoid TLB fills */ - if (!(flags & (IOMMU_READ | IOMMU_WRITE))) + if (flags & IOMMU_EXEC) + pteval &= ~ARM_SMMU_PTE_XN; + else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) pteval &= ~ARM_SMMU_PTE_PAGE; pteval |= ARM_SMMU_PTE_SH_IS; @@ -1394,7 +1397,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, if (paddr & ~output_mask) return -ERANGE; - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); pgd += pgd_index(iova); end = iova + size; do { @@ -1410,7 +1413,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, } while (pgd++, iova != end); out_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Ensure new page tables are visible to the hardware walker */ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) @@ -1423,9 +1426,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int flags) { struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; - if (!smmu_domain || !smmu) + if (!smmu_domain) return -ENODEV; /* Check for silent address truncation up the SMMU chain. */ @@ -1449,44 +1451,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp, pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; - spin_lock(&smmu_domain->lock); - pgd = root_cfg->pgd; - if (!pgd) - goto err_unlock; + pgdp = root_cfg->pgd; + if (!pgdp) + return 0; - pgd += pgd_index(iova); - if (pgd_none_or_clear_bad(pgd)) - goto err_unlock; + pgd = *(pgdp + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; - pud = pud_offset(pgd, iova); - if (pud_none_or_clear_bad(pud)) - goto err_unlock; + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; - pmd = pmd_offset(pud, iova); - if (pmd_none_or_clear_bad(pmd)) - goto err_unlock; + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; - pte = pmd_page_vaddr(*pmd) + pte_index(iova); + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); if (pte_none(pte)) - goto err_unlock; - - spin_unlock(&smmu_domain->lock); - return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); + return 0; -err_unlock: - spin_unlock(&smmu_domain->lock); - dev_warn(smmu->dev, - "invalid (corrupt?) page tables detected for iova 0x%llx\n", - (unsigned long long)iova); - return -EINVAL; + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); } static int arm_smmu_domain_has_cap(struct iommu_domain *domain, @@ -1505,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev) { struct arm_smmu_device *child, *parent, *smmu; struct arm_smmu_master *master = NULL; + struct iommu_group *group; + int ret; + + if (dev->archdata.iommu) { + dev_warn(dev, "IOMMU driver already assigned to device\n"); + return -EINVAL; + } spin_lock(&arm_smmu_devices_lock); list_for_each_entry(parent, &arm_smmu_devices, list) { @@ -1537,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev) if (!master) return -ENODEV; + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + + ret = iommu_group_add_device(group, dev); + iommu_group_put(group); dev->archdata.iommu = smmu; - return 0; + + return ret; } static void arm_smmu_remove_device(struct device *dev) { dev->archdata.iommu = NULL; + iommu_group_remove_device(dev); } static struct iommu_ops arm_smmu_ops = { @@ -1741,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * allocation (PTRS_PER_PGD). */ #ifdef CONFIG_64BIT - /* Current maximum output size of 39 bits */ smmu->s1_output_size = min(39UL, size); #else smmu->s1_output_size = min(32UL, size); @@ -1756,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) } else { #ifdef CONFIG_64BIT size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; - size = min(39, arm_smmu_id_size_to_bits(size)); + size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); #else size = 32; #endif @@ -1863,6 +1871,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) dev_err(dev, "found only %d context interrupt(s) but %d required\n", smmu->num_context_irqs, smmu->num_context_banks); + err = -ENODEV; goto out_put_parent; }