/*
* If it's a multifunction device that does not support our
---- * required ACS flags, add to the same group as function 0.
++++ * required ACS flags, add to the same group as lowest numbered
++++ * function that also does not suport the required ACS flags.
*/
if (dma_pdev->multifunction &&
---- !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
---- swap_pci_ref(&dma_pdev,
---- pci_get_slot(dma_pdev->bus,
---- PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
---- 0)));
++++ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
++++ u8 i, slot = PCI_SLOT(dma_pdev->devfn);
++++
++++ for (i = 0; i < 8; i++) {
++++ struct pci_dev *tmp;
++++
++++ tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
++++ if (!tmp)
++++ continue;
++++
++++ if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
++++ swap_pci_ref(&dma_pdev, tmp);
++++ break;
++++ }
++++ pci_dev_put(tmp);
++++ }
++++ }
/*
* Devices on the root bus go through the iommu. If that's not us,
/* Large PTE found which maps this address */
unmap_size = PTE_PAGE_SIZE(*pte);
+++ +
+++ + /* Only unmap from the first pte in the page */
+++ + if ((unmap_size - 1) & bus_addr)
+++ + break;
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
unmapped += unmap_size;
}
--- - BUG_ON(!is_power_of_2(unmapped));
+++ + BUG_ON(unmapped && !is_power_of_2(unmapped));
return unmapped;
}
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
+++ +#define DEFINE_FREE_PT_FN(LVL, FN) \
+++ +static void free_pt_##LVL (unsigned long __pt) \
+++ +{ \
+++ + unsigned long p; \
+++ + u64 *pt; \
+++ + int i; \
+++ + \
+++ + pt = (u64 *)__pt; \
+++ + \
+++ + for (i = 0; i < 512; ++i) { \
+++ + if (!IOMMU_PTE_PRESENT(pt[i])) \
+++ + continue; \
+++ + \
+++ + p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
+++ + FN(p); \
+++ + } \
+++ + free_page((unsigned long)pt); \
+++ +}
+++ +
+++ +DEFINE_FREE_PT_FN(l2, free_page)
+++ +DEFINE_FREE_PT_FN(l3, free_pt_l2)
+++ +DEFINE_FREE_PT_FN(l4, free_pt_l3)
+++ +DEFINE_FREE_PT_FN(l5, free_pt_l4)
+++ +DEFINE_FREE_PT_FN(l6, free_pt_l5)
+++ +
static void free_pagetable(struct protection_domain *domain)
{
--- - int i, j;
--- - u64 *p1, *p2, *p3;
-
- p1 = domain->pt_root;
-
- if (!p1)
- return;
-
- for (i = 0; i < 512; ++i) {
- if (!IOMMU_PTE_PRESENT(p1[i]))
- continue;
-
- p2 = IOMMU_PTE_PAGE(p1[i]);
- for (j = 0; j < 512; ++j) {
- if (!IOMMU_PTE_PRESENT(p2[j]))
- continue;
- p3 = IOMMU_PTE_PAGE(p2[j]);
- free_page((unsigned long)p3);
- }
+++ + unsigned long root = (unsigned long)domain->pt_root;
-- - p1 = domain->pt_root;
-- -
-- - if (!p1)
-- - return;
-- -
-- - for (i = 0; i < 512; ++i) {
-- - if (!IOMMU_PTE_PRESENT(p1[i]))
-- - continue;
-- -
-- - p2 = IOMMU_PTE_PAGE(p1[i]);
-- - for (j = 0; j < 512; ++j) {
-- - if (!IOMMU_PTE_PRESENT(p2[j]))
-- - continue;
-- - p3 = IOMMU_PTE_PAGE(p2[j]);
-- - free_page((unsigned long)p3);
-- - }
-- -
--- - free_page((unsigned long)p2);
+++ + switch (domain->mode) {
+++ + case PAGE_MODE_NONE:
+++ + break;
+++ + case PAGE_MODE_1_LEVEL:
+++ + free_page(root);
+++ + break;
+++ + case PAGE_MODE_2_LEVEL:
+++ + free_pt_l2(root);
+++ + break;
+++ + case PAGE_MODE_3_LEVEL:
+++ + free_pt_l3(root);
+++ + break;
+++ + case PAGE_MODE_4_LEVEL:
+++ + free_pt_l4(root);
+++ + break;
+++ + case PAGE_MODE_5_LEVEL:
+++ + free_pt_l5(root);
+++ + break;
+++ + case PAGE_MODE_6_LEVEL:
+++ + free_pt_l6(root);
+++ + break;
+++ + default:
+++ + BUG();
}
--- -
--- - free_page((unsigned long)p1);
--- -
--- - domain->pt_root = NULL;
}
static void free_gcr3_tbl_level1(u64 *tbl)