]> Pileus Git - ~andy/linux/blobdiff - drivers/iommu/intel-iommu.c
Merge branches 'iommu/fixes', 'x86/amd', 'groups', 'arm/tegra' and 'api/domain-attr...
[~andy/linux] / drivers / iommu / intel-iommu.c
index bf2fbaad5e2295a2417c26ef1224ea8a51e8238e..d4b018e51592f0789dc781b3b3c0c675af3ff92f 100644 (file)
@@ -1907,6 +1907,15 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
        iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
 }
 
+static inline void unlink_domain_info(struct device_domain_info *info)
+{
+       assert_spin_locked(&device_domain_lock);
+       list_del(&info->link);
+       list_del(&info->global);
+       if (info->dev)
+               info->dev->dev.archdata.iommu = NULL;
+}
+
 static void domain_remove_dev_info(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
@@ -1917,10 +1926,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
        while (!list_empty(&domain->devices)) {
                info = list_entry(domain->devices.next,
                        struct device_domain_info, link);
-               list_del(&info->link);
-               list_del(&info->global);
-               if (info->dev)
-                       info->dev->dev.archdata.iommu = NULL;
+               unlink_domain_info(info);
                spin_unlock_irqrestore(&device_domain_lock, flags);
 
                iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
        if (!info)
                return -ENOMEM;
 
-       ret = domain_context_mapping(domain, pdev, translation);
-       if (ret) {
-               free_devinfo_mem(info);
-               return ret;
-       }
-
        info->segment = pci_domain_nr(pdev->bus);
        info->bus = pdev->bus->number;
        info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@ static int domain_add_dev_info(struct dmar_domain *domain,
        pdev->dev.archdata.iommu = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
+       ret = domain_context_mapping(domain, pdev, translation);
+       if (ret) {
+               spin_lock_irqsave(&device_domain_lock, flags);
+               unlink_domain_info(info);
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               free_devinfo_mem(info);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -3728,10 +3737,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
                if (info->segment == pci_domain_nr(pdev->bus) &&
                    info->bus == pdev->bus->number &&
                    info->devfn == pdev->devfn) {
-                       list_del(&info->link);
-                       list_del(&info->global);
-                       if (info->dev)
-                               info->dev->dev.archdata.iommu = NULL;
+                       unlink_domain_info(info);
                        spin_unlock_irqrestore(&device_domain_lock, flags);
 
                        iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
        while (!list_empty(&domain->devices)) {
                info = list_entry(domain->devices.next,
                        struct device_domain_info, link);
-               list_del(&info->link);
-               list_del(&info->global);
-               if (info->dev)
-                       info->dev->dev.archdata.iommu = NULL;
-
+               unlink_domain_info(info);
                spin_unlock_irqrestore(&device_domain_lock, flags1);
 
                iommu_disable_dev_iotlb(info);
@@ -3930,6 +3932,10 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
        domain_update_iommu_cap(dmar_domain);
        domain->priv = dmar_domain;
 
+       domain->geometry.aperture_start = 0;
+       domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+       domain->geometry.force_aperture = true;
+
        return 0;
 }
 
@@ -4088,52 +4094,70 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
        return 0;
 }
 
-/*
- * Group numbers are arbitrary.  Device with the same group number
- * indicate the iommu cannot differentiate between them.  To avoid
- * tracking used groups we just use the seg|bus|devfn of the lowest
- * level we're able to differentiate devices
- */
-static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct pci_dev *bridge;
-       union {
-               struct {
-                       u8 devfn;
-                       u8 bus;
-                       u16 segment;
-               } pci;
-               u32 group;
-       } id;
+       pci_dev_put(*from);
+       *from = to;
+}
 
-       if (iommu_no_mapping(dev))
-               return -ENODEV;
+#define REQ_ACS_FLAGS  (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
 
-       id.pci.segment = pci_domain_nr(pdev->bus);
-       id.pci.bus = pdev->bus->number;
-       id.pci.devfn = pdev->devfn;
+static int intel_iommu_add_device(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct pci_dev *bridge, *dma_pdev;
+       struct iommu_group *group;
+       int ret;
 
-       if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+       if (!device_to_iommu(pci_domain_nr(pdev->bus),
+                            pdev->bus->number, pdev->devfn))
                return -ENODEV;
 
        bridge = pci_find_upstream_pcie_bridge(pdev);
        if (bridge) {
-               if (pci_is_pcie(bridge)) {
-                       id.pci.bus = bridge->subordinate->number;
-                       id.pci.devfn = 0;
-               } else {
-                       id.pci.bus = bridge->bus->number;
-                       id.pci.devfn = bridge->devfn;
-               }
+               if (pci_is_pcie(bridge))
+                       dma_pdev = pci_get_domain_bus_and_slot(
+                                               pci_domain_nr(pdev->bus),
+                                               bridge->subordinate->number, 0);
+               else
+                       dma_pdev = pci_dev_get(bridge);
+       } else
+               dma_pdev = pci_dev_get(pdev);
+
+       swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+
+       if (dma_pdev->multifunction &&
+           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
+               swap_pci_ref(&dma_pdev,
+                            pci_get_slot(dma_pdev->bus,
+                                         PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
+                                         0)));
+
+       while (!pci_is_root_bus(dma_pdev->bus)) {
+               if (pci_acs_path_enabled(dma_pdev->bus->self,
+                                        NULL, REQ_ACS_FLAGS))
+                       break;
+
+               swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
        }
 
-       if (!pdev->is_virtfn && iommu_group_mf)
-               id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+       group = iommu_group_get(&dma_pdev->dev);
+       pci_dev_put(dma_pdev);
+       if (!group) {
+               group = iommu_group_alloc();
+               if (IS_ERR(group))
+                       return PTR_ERR(group);
+       }
 
-       *groupid = id.group;
+       ret = iommu_group_add_device(group, dev);
 
-       return 0;
+       iommu_group_put(group);
+       return ret;
+}
+
+static void intel_iommu_remove_device(struct device *dev)
+{
+       iommu_group_remove_device(dev);
 }
 
 static struct iommu_ops intel_iommu_ops = {
@@ -4145,7 +4169,8 @@ static struct iommu_ops intel_iommu_ops = {
        .unmap          = intel_iommu_unmap,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .domain_has_cap = intel_iommu_domain_has_cap,
-       .device_group   = intel_iommu_device_group,
+       .add_device     = intel_iommu_add_device,
+       .remove_device  = intel_iommu_remove_device,
        .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
 };