2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <asm/cacheflush.h>
46 #include <asm/iommu.h>
48 #define ROOT_SIZE VTD_PAGE_SIZE
49 #define CONTEXT_SIZE VTD_PAGE_SIZE
51 #define IS_BRIDGE_HOST_DEVICE(pdev) \
52 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
53 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
54 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
55 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
57 #define IOAPIC_RANGE_START (0xfee00000)
58 #define IOAPIC_RANGE_END (0xfeefffff)
59 #define IOVA_START_ADDR (0x1000)
61 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
63 #define MAX_AGAW_WIDTH 64
65 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
78 /* page table handling */
79 #define LEVEL_STRIDE (9)
80 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82 static inline int agaw_to_level(int agaw)
87 static inline int agaw_to_width(int agaw)
89 return 30 + agaw * LEVEL_STRIDE;
92 static inline int width_to_agaw(int width)
94 return (width - 30) / LEVEL_STRIDE;
97 static inline unsigned int level_to_offset_bits(int level)
99 return (level - 1) * LEVEL_STRIDE;
102 static inline int pfn_level_offset(unsigned long pfn, int level)
104 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
107 static inline unsigned long level_mask(int level)
109 return -1UL << level_to_offset_bits(level);
112 static inline unsigned long level_size(int level)
114 return 1UL << level_to_offset_bits(level);
117 static inline unsigned long align_to_level(unsigned long pfn, int level)
119 return (pfn + level_size(level) - 1) & level_mask(level);
122 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
124 return 1 << ((lvl - 1) * LEVEL_STRIDE);
127 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
128 are never going to work. */
129 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
131 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
134 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
136 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
138 static inline unsigned long page_to_dma_pfn(struct page *pg)
140 return mm_to_dma_pfn(page_to_pfn(pg));
142 static inline unsigned long virt_to_dma_pfn(void *p)
144 return page_to_dma_pfn(virt_to_page(p));
147 /* global iommu list, set NULL for ignored DMAR units */
148 static struct intel_iommu **g_iommus;
150 static void __init check_tylersburg_isoch(void);
151 static int rwbf_quirk;
154 * set to 1 to panic kernel if can't successfully enable VT-d
155 * (used when kernel is launched w/ TXT)
157 static int force_on = 0;
162 * 12-63: Context Ptr (12 - (haw-1))
169 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
170 static inline bool root_present(struct root_entry *root)
172 return (root->val & 1);
174 static inline void set_root_present(struct root_entry *root)
178 static inline void set_root_value(struct root_entry *root, unsigned long value)
180 root->val |= value & VTD_PAGE_MASK;
183 static inline struct context_entry *
184 get_context_addr_from_root(struct root_entry *root)
186 return (struct context_entry *)
187 (root_present(root)?phys_to_virt(
188 root->val & VTD_PAGE_MASK) :
195 * 1: fault processing disable
196 * 2-3: translation type
197 * 12-63: address space root
203 struct context_entry {
208 static inline bool context_present(struct context_entry *context)
210 return (context->lo & 1);
212 static inline void context_set_present(struct context_entry *context)
217 static inline void context_set_fault_enable(struct context_entry *context)
219 context->lo &= (((u64)-1) << 2) | 1;
222 static inline void context_set_translation_type(struct context_entry *context,
225 context->lo &= (((u64)-1) << 4) | 3;
226 context->lo |= (value & 3) << 2;
229 static inline void context_set_address_root(struct context_entry *context,
232 context->lo |= value & VTD_PAGE_MASK;
235 static inline void context_set_address_width(struct context_entry *context,
238 context->hi |= value & 7;
241 static inline void context_set_domain_id(struct context_entry *context,
244 context->hi |= (value & ((1 << 16) - 1)) << 8;
247 static inline void context_clear_entry(struct context_entry *context)
260 * 12-63: Host physcial address
266 static inline void dma_clear_pte(struct dma_pte *pte)
271 static inline void dma_set_pte_readable(struct dma_pte *pte)
273 pte->val |= DMA_PTE_READ;
276 static inline void dma_set_pte_writable(struct dma_pte *pte)
278 pte->val |= DMA_PTE_WRITE;
281 static inline void dma_set_pte_snp(struct dma_pte *pte)
283 pte->val |= DMA_PTE_SNP;
286 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
288 pte->val = (pte->val & ~3) | (prot & 3);
291 static inline u64 dma_pte_addr(struct dma_pte *pte)
294 return pte->val & VTD_PAGE_MASK;
296 /* Must have a full atomic 64-bit read */
297 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
301 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
303 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
306 static inline bool dma_pte_present(struct dma_pte *pte)
308 return (pte->val & 3) != 0;
311 static inline bool dma_pte_superpage(struct dma_pte *pte)
313 return (pte->val & (1 << 7));
316 static inline int first_pte_in_page(struct dma_pte *pte)
318 return !((unsigned long)pte & ~VTD_PAGE_MASK);
322 * This domain is a statically identity mapping domain.
323 * 1. This domain creats a static 1:1 mapping to all usable memory.
324 * 2. It maps to each iommu if successful.
325 * 3. Each iommu mapps to this domain if successful.
327 static struct dmar_domain *si_domain;
328 static int hw_pass_through = 1;
330 /* devices under the same p2p bridge are owned in one domain */
331 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
333 /* domain represents a virtual machine, more than one devices
334 * across iommus may be owned in one domain, e.g. kvm guest.
336 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
338 /* si_domain contains mulitple devices */
339 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
342 int id; /* domain id */
343 int nid; /* node id */
344 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
346 struct list_head devices; /* all devices' list */
347 struct iova_domain iovad; /* iova's that belong to this domain */
349 struct dma_pte *pgd; /* virtual address */
350 int gaw; /* max guest address width */
352 /* adjusted guest address width, 0 is level 2 30-bit */
355 int flags; /* flags to find out type of domain */
357 int iommu_coherency;/* indicate coherency of iommu access */
358 int iommu_snooping; /* indicate snooping control feature*/
359 int iommu_count; /* reference count of iommu */
360 int iommu_superpage;/* Level of superpages supported:
361 0 == 4KiB (no superpages), 1 == 2MiB,
362 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
363 spinlock_t iommu_lock; /* protect iommu set in domain */
364 u64 max_addr; /* maximum mapped address */
367 /* PCI domain-device relationship */
368 struct device_domain_info {
369 struct list_head link; /* link to domain siblings */
370 struct list_head global; /* link to global list */
371 int segment; /* PCI domain */
372 u8 bus; /* PCI bus number */
373 u8 devfn; /* PCI devfn number */
374 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
375 struct intel_iommu *iommu; /* IOMMU used by this device */
376 struct dmar_domain *domain; /* pointer to domain */
379 static void flush_unmaps_timeout(unsigned long data);
381 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
383 #define HIGH_WATER_MARK 250
384 struct deferred_flush_tables {
386 struct iova *iova[HIGH_WATER_MARK];
387 struct dmar_domain *domain[HIGH_WATER_MARK];
390 static struct deferred_flush_tables *deferred_flush;
392 /* bitmap for indexing intel_iommus */
393 static int g_num_of_iommus;
395 static DEFINE_SPINLOCK(async_umap_flush_lock);
396 static LIST_HEAD(unmaps_to_do);
399 static long list_size;
401 static void domain_remove_dev_info(struct dmar_domain *domain);
403 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
404 int dmar_disabled = 0;
406 int dmar_disabled = 1;
407 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
409 int intel_iommu_enabled = 0;
410 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
412 static int dmar_map_gfx = 1;
413 static int dmar_forcedac;
414 static int intel_iommu_strict;
415 static int intel_iommu_superpage = 1;
417 int intel_iommu_gfx_mapped;
418 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
420 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
421 static DEFINE_SPINLOCK(device_domain_lock);
422 static LIST_HEAD(device_domain_list);
424 static struct iommu_ops intel_iommu_ops;
426 static int __init intel_iommu_setup(char *str)
431 if (!strncmp(str, "on", 2)) {
433 printk(KERN_INFO "Intel-IOMMU: enabled\n");
434 } else if (!strncmp(str, "off", 3)) {
436 printk(KERN_INFO "Intel-IOMMU: disabled\n");
437 } else if (!strncmp(str, "igfx_off", 8)) {
440 "Intel-IOMMU: disable GFX device mapping\n");
441 } else if (!strncmp(str, "forcedac", 8)) {
443 "Intel-IOMMU: Forcing DAC for PCI devices\n");
445 } else if (!strncmp(str, "strict", 6)) {
447 "Intel-IOMMU: disable batched IOTLB flush\n");
448 intel_iommu_strict = 1;
449 } else if (!strncmp(str, "sp_off", 6)) {
451 "Intel-IOMMU: disable supported super page\n");
452 intel_iommu_superpage = 0;
455 str += strcspn(str, ",");
461 __setup("intel_iommu=", intel_iommu_setup);
463 static struct kmem_cache *iommu_domain_cache;
464 static struct kmem_cache *iommu_devinfo_cache;
465 static struct kmem_cache *iommu_iova_cache;
467 static inline void *alloc_pgtable_page(int node)
472 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
474 vaddr = page_address(page);
478 static inline void free_pgtable_page(void *vaddr)
480 free_page((unsigned long)vaddr);
483 static inline void *alloc_domain_mem(void)
485 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
488 static void free_domain_mem(void *vaddr)
490 kmem_cache_free(iommu_domain_cache, vaddr);
493 static inline void * alloc_devinfo_mem(void)
495 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
498 static inline void free_devinfo_mem(void *vaddr)
500 kmem_cache_free(iommu_devinfo_cache, vaddr);
503 struct iova *alloc_iova_mem(void)
505 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
508 void free_iova_mem(struct iova *iova)
510 kmem_cache_free(iommu_iova_cache, iova);
514 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
519 sagaw = cap_sagaw(iommu->cap);
520 for (agaw = width_to_agaw(max_gaw);
522 if (test_bit(agaw, &sagaw))
530 * Calculate max SAGAW for each iommu.
532 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
534 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
538 * calculate agaw for each iommu.
539 * "SAGAW" may be different across iommus, use a default agaw, and
540 * get a supported less agaw for iommus that don't support the default agaw.
542 int iommu_calculate_agaw(struct intel_iommu *iommu)
544 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
547 /* This functionin only returns single iommu in a domain */
548 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
552 /* si_domain and vm domain should not get here. */
553 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
554 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
556 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
557 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
560 return g_iommus[iommu_id];
563 static void domain_update_iommu_coherency(struct dmar_domain *domain)
567 domain->iommu_coherency = 1;
569 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
570 if (!ecap_coherent(g_iommus[i]->ecap)) {
571 domain->iommu_coherency = 0;
577 static void domain_update_iommu_snooping(struct dmar_domain *domain)
581 domain->iommu_snooping = 1;
583 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
584 if (!ecap_sc_support(g_iommus[i]->ecap)) {
585 domain->iommu_snooping = 0;
591 static void domain_update_iommu_superpage(struct dmar_domain *domain)
593 struct dmar_drhd_unit *drhd;
594 struct intel_iommu *iommu = NULL;
597 if (!intel_iommu_superpage) {
598 domain->iommu_superpage = 0;
602 /* set iommu_superpage to the smallest common denominator */
603 for_each_active_iommu(iommu, drhd) {
604 mask &= cap_super_page_val(iommu->cap);
609 domain->iommu_superpage = fls(mask);
612 /* Some capabilities may be different across iommus */
613 static void domain_update_iommu_cap(struct dmar_domain *domain)
615 domain_update_iommu_coherency(domain);
616 domain_update_iommu_snooping(domain);
617 domain_update_iommu_superpage(domain);
620 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
622 struct dmar_drhd_unit *drhd = NULL;
625 for_each_drhd_unit(drhd) {
628 if (segment != drhd->segment)
631 for (i = 0; i < drhd->devices_cnt; i++) {
632 if (drhd->devices[i] &&
633 drhd->devices[i]->bus->number == bus &&
634 drhd->devices[i]->devfn == devfn)
636 if (drhd->devices[i] &&
637 drhd->devices[i]->subordinate &&
638 drhd->devices[i]->subordinate->number <= bus &&
639 drhd->devices[i]->subordinate->subordinate >= bus)
643 if (drhd->include_all)
650 static void domain_flush_cache(struct dmar_domain *domain,
651 void *addr, int size)
653 if (!domain->iommu_coherency)
654 clflush_cache_range(addr, size);
657 /* Gets context entry for a given bus and devfn */
658 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
661 struct root_entry *root;
662 struct context_entry *context;
663 unsigned long phy_addr;
666 spin_lock_irqsave(&iommu->lock, flags);
667 root = &iommu->root_entry[bus];
668 context = get_context_addr_from_root(root);
670 context = (struct context_entry *)
671 alloc_pgtable_page(iommu->node);
673 spin_unlock_irqrestore(&iommu->lock, flags);
676 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
677 phy_addr = virt_to_phys((void *)context);
678 set_root_value(root, phy_addr);
679 set_root_present(root);
680 __iommu_flush_cache(iommu, root, sizeof(*root));
682 spin_unlock_irqrestore(&iommu->lock, flags);
683 return &context[devfn];
686 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
688 struct root_entry *root;
689 struct context_entry *context;
693 spin_lock_irqsave(&iommu->lock, flags);
694 root = &iommu->root_entry[bus];
695 context = get_context_addr_from_root(root);
700 ret = context_present(&context[devfn]);
702 spin_unlock_irqrestore(&iommu->lock, flags);
706 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
708 struct root_entry *root;
709 struct context_entry *context;
712 spin_lock_irqsave(&iommu->lock, flags);
713 root = &iommu->root_entry[bus];
714 context = get_context_addr_from_root(root);
716 context_clear_entry(&context[devfn]);
717 __iommu_flush_cache(iommu, &context[devfn], \
720 spin_unlock_irqrestore(&iommu->lock, flags);
723 static void free_context_table(struct intel_iommu *iommu)
725 struct root_entry *root;
728 struct context_entry *context;
730 spin_lock_irqsave(&iommu->lock, flags);
731 if (!iommu->root_entry) {
734 for (i = 0; i < ROOT_ENTRY_NR; i++) {
735 root = &iommu->root_entry[i];
736 context = get_context_addr_from_root(root);
738 free_pgtable_page(context);
740 free_pgtable_page(iommu->root_entry);
741 iommu->root_entry = NULL;
743 spin_unlock_irqrestore(&iommu->lock, flags);
746 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
747 unsigned long pfn, int target_level)
749 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
750 struct dma_pte *parent, *pte = NULL;
751 int level = agaw_to_level(domain->agaw);
754 BUG_ON(!domain->pgd);
755 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
756 parent = domain->pgd;
761 offset = pfn_level_offset(pfn, level);
762 pte = &parent[offset];
763 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
765 if (level == target_level)
768 if (!dma_pte_present(pte)) {
771 tmp_page = alloc_pgtable_page(domain->nid);
776 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
777 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
778 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
779 /* Someone else set it while we were thinking; use theirs. */
780 free_pgtable_page(tmp_page);
783 domain_flush_cache(domain, pte, sizeof(*pte));
786 parent = phys_to_virt(dma_pte_addr(pte));
794 /* return address's pte at specific level */
795 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
797 int level, int *large_page)
799 struct dma_pte *parent, *pte = NULL;
800 int total = agaw_to_level(domain->agaw);
803 parent = domain->pgd;
804 while (level <= total) {
805 offset = pfn_level_offset(pfn, total);
806 pte = &parent[offset];
810 if (!dma_pte_present(pte)) {
815 if (pte->val & DMA_PTE_LARGE_PAGE) {
820 parent = phys_to_virt(dma_pte_addr(pte));
826 /* clear last level pte, a tlb flush should be followed */
827 static int dma_pte_clear_range(struct dmar_domain *domain,
828 unsigned long start_pfn,
829 unsigned long last_pfn)
831 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
832 unsigned int large_page = 1;
833 struct dma_pte *first_pte, *pte;
836 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
837 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
838 BUG_ON(start_pfn > last_pfn);
840 /* we don't need lock here; nobody else touches the iova range */
843 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
845 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
850 start_pfn += lvl_to_nr_pages(large_page);
852 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
854 domain_flush_cache(domain, first_pte,
855 (void *)pte - (void *)first_pte);
857 } while (start_pfn && start_pfn <= last_pfn);
859 order = (large_page - 1) * 9;
863 /* free page table pages. last level pte should already be cleared */
864 static void dma_pte_free_pagetable(struct dmar_domain *domain,
865 unsigned long start_pfn,
866 unsigned long last_pfn)
868 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
869 struct dma_pte *first_pte, *pte;
870 int total = agaw_to_level(domain->agaw);
875 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
876 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
877 BUG_ON(start_pfn > last_pfn);
879 /* We don't need lock here; nobody else touches the iova range */
881 while (level <= total) {
882 tmp = align_to_level(start_pfn, level);
884 /* If we can't even clear one PTE at this level, we're done */
885 if (tmp + level_size(level) - 1 > last_pfn)
890 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
891 if (large_page > level)
892 level = large_page + 1;
894 tmp = align_to_level(tmp + 1, level + 1);
898 if (dma_pte_present(pte)) {
899 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
903 tmp += level_size(level);
904 } while (!first_pte_in_page(pte) &&
905 tmp + level_size(level) - 1 <= last_pfn);
907 domain_flush_cache(domain, first_pte,
908 (void *)pte - (void *)first_pte);
910 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
914 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
915 free_pgtable_page(domain->pgd);
921 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
923 struct root_entry *root;
926 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
930 __iommu_flush_cache(iommu, root, ROOT_SIZE);
932 spin_lock_irqsave(&iommu->lock, flags);
933 iommu->root_entry = root;
934 spin_unlock_irqrestore(&iommu->lock, flags);
939 static void iommu_set_root_entry(struct intel_iommu *iommu)
945 addr = iommu->root_entry;
947 raw_spin_lock_irqsave(&iommu->register_lock, flag);
948 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
950 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
952 /* Make sure hardware complete it */
953 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
954 readl, (sts & DMA_GSTS_RTPS), sts);
956 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
959 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
964 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
967 raw_spin_lock_irqsave(&iommu->register_lock, flag);
968 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
970 /* Make sure hardware complete it */
971 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
972 readl, (!(val & DMA_GSTS_WBFS)), val);
974 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
977 /* return value determine if we need a write buffer flush */
978 static void __iommu_flush_context(struct intel_iommu *iommu,
979 u16 did, u16 source_id, u8 function_mask,
986 case DMA_CCMD_GLOBAL_INVL:
987 val = DMA_CCMD_GLOBAL_INVL;
989 case DMA_CCMD_DOMAIN_INVL:
990 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
992 case DMA_CCMD_DEVICE_INVL:
993 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
994 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1001 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1002 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1004 /* Make sure hardware complete it */
1005 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1006 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1008 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1011 /* return value determine if we need a write buffer flush */
1012 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1013 u64 addr, unsigned int size_order, u64 type)
1015 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1016 u64 val = 0, val_iva = 0;
1020 case DMA_TLB_GLOBAL_FLUSH:
1021 /* global flush doesn't need set IVA_REG */
1022 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1024 case DMA_TLB_DSI_FLUSH:
1025 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1027 case DMA_TLB_PSI_FLUSH:
1028 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1029 /* Note: always flush non-leaf currently */
1030 val_iva = size_order | addr;
1035 /* Note: set drain read/write */
1038 * This is probably to be super secure.. Looks like we can
1039 * ignore it without any impact.
1041 if (cap_read_drain(iommu->cap))
1042 val |= DMA_TLB_READ_DRAIN;
1044 if (cap_write_drain(iommu->cap))
1045 val |= DMA_TLB_WRITE_DRAIN;
1047 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1048 /* Note: Only uses first TLB reg currently */
1050 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1051 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1053 /* Make sure hardware complete it */
1054 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1055 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1057 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1059 /* check IOTLB invalidation granularity */
1060 if (DMA_TLB_IAIG(val) == 0)
1061 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1062 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1063 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1064 (unsigned long long)DMA_TLB_IIRG(type),
1065 (unsigned long long)DMA_TLB_IAIG(val));
1068 static struct device_domain_info *iommu_support_dev_iotlb(
1069 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1072 unsigned long flags;
1073 struct device_domain_info *info;
1074 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1076 if (!ecap_dev_iotlb_support(iommu->ecap))
1082 spin_lock_irqsave(&device_domain_lock, flags);
1083 list_for_each_entry(info, &domain->devices, link)
1084 if (info->bus == bus && info->devfn == devfn) {
1088 spin_unlock_irqrestore(&device_domain_lock, flags);
1090 if (!found || !info->dev)
1093 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1096 if (!dmar_find_matched_atsr_unit(info->dev))
1099 info->iommu = iommu;
1104 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1109 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1112 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1114 if (!info->dev || !pci_ats_enabled(info->dev))
1117 pci_disable_ats(info->dev);
1120 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1121 u64 addr, unsigned mask)
1124 unsigned long flags;
1125 struct device_domain_info *info;
1127 spin_lock_irqsave(&device_domain_lock, flags);
1128 list_for_each_entry(info, &domain->devices, link) {
1129 if (!info->dev || !pci_ats_enabled(info->dev))
1132 sid = info->bus << 8 | info->devfn;
1133 qdep = pci_ats_queue_depth(info->dev);
1134 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1136 spin_unlock_irqrestore(&device_domain_lock, flags);
1139 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1140 unsigned long pfn, unsigned int pages, int map)
1142 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1143 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1148 * Fallback to domain selective flush if no PSI support or the size is
1150 * PSI requires page size to be 2 ^ x, and the base address is naturally
1151 * aligned to the size
1153 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1154 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1157 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1161 * In caching mode, changes of pages from non-present to present require
1162 * flush. However, device IOTLB doesn't need to be flushed in this case.
1164 if (!cap_caching_mode(iommu->cap) || !map)
1165 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1168 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1171 unsigned long flags;
1173 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1174 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1175 pmen &= ~DMA_PMEN_EPM;
1176 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1178 /* wait for the protected region status bit to clear */
1179 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1180 readl, !(pmen & DMA_PMEN_PRS), pmen);
1182 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1185 static int iommu_enable_translation(struct intel_iommu *iommu)
1188 unsigned long flags;
1190 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1191 iommu->gcmd |= DMA_GCMD_TE;
1192 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1194 /* Make sure hardware complete it */
1195 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1196 readl, (sts & DMA_GSTS_TES), sts);
1198 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1202 static int iommu_disable_translation(struct intel_iommu *iommu)
1207 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208 iommu->gcmd &= ~DMA_GCMD_TE;
1209 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1213 readl, (!(sts & DMA_GSTS_TES)), sts);
1215 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1220 static int iommu_init_domains(struct intel_iommu *iommu)
1222 unsigned long ndomains;
1223 unsigned long nlongs;
1225 ndomains = cap_ndoms(iommu->cap);
1226 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1228 nlongs = BITS_TO_LONGS(ndomains);
1230 spin_lock_init(&iommu->lock);
1232 /* TBD: there might be 64K domains,
1233 * consider other allocation for future chip
1235 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1236 if (!iommu->domain_ids) {
1237 printk(KERN_ERR "Allocating domain id array failed\n");
1240 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1242 if (!iommu->domains) {
1243 printk(KERN_ERR "Allocating domain array failed\n");
1248 * if Caching mode is set, then invalid translations are tagged
1249 * with domainid 0. Hence we need to pre-allocate it.
1251 if (cap_caching_mode(iommu->cap))
1252 set_bit(0, iommu->domain_ids);
1257 static void domain_exit(struct dmar_domain *domain);
1258 static void vm_domain_exit(struct dmar_domain *domain);
1260 void free_dmar_iommu(struct intel_iommu *iommu)
1262 struct dmar_domain *domain;
1264 unsigned long flags;
1266 if ((iommu->domains) && (iommu->domain_ids)) {
1267 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1268 domain = iommu->domains[i];
1269 clear_bit(i, iommu->domain_ids);
1271 spin_lock_irqsave(&domain->iommu_lock, flags);
1272 if (--domain->iommu_count == 0) {
1273 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1274 vm_domain_exit(domain);
1276 domain_exit(domain);
1278 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1282 if (iommu->gcmd & DMA_GCMD_TE)
1283 iommu_disable_translation(iommu);
1286 irq_set_handler_data(iommu->irq, NULL);
1287 /* This will mask the irq */
1288 free_irq(iommu->irq, iommu);
1289 destroy_irq(iommu->irq);
1292 kfree(iommu->domains);
1293 kfree(iommu->domain_ids);
1295 g_iommus[iommu->seq_id] = NULL;
1297 /* if all iommus are freed, free g_iommus */
1298 for (i = 0; i < g_num_of_iommus; i++) {
1303 if (i == g_num_of_iommus)
1306 /* free context mapping */
1307 free_context_table(iommu);
1310 static struct dmar_domain *alloc_domain(void)
1312 struct dmar_domain *domain;
1314 domain = alloc_domain_mem();
1319 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1325 static int iommu_attach_domain(struct dmar_domain *domain,
1326 struct intel_iommu *iommu)
1329 unsigned long ndomains;
1330 unsigned long flags;
1332 ndomains = cap_ndoms(iommu->cap);
1334 spin_lock_irqsave(&iommu->lock, flags);
1336 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1337 if (num >= ndomains) {
1338 spin_unlock_irqrestore(&iommu->lock, flags);
1339 printk(KERN_ERR "IOMMU: no free domain ids\n");
1344 set_bit(num, iommu->domain_ids);
1345 set_bit(iommu->seq_id, &domain->iommu_bmp);
1346 iommu->domains[num] = domain;
1347 spin_unlock_irqrestore(&iommu->lock, flags);
1352 static void iommu_detach_domain(struct dmar_domain *domain,
1353 struct intel_iommu *iommu)
1355 unsigned long flags;
1359 spin_lock_irqsave(&iommu->lock, flags);
1360 ndomains = cap_ndoms(iommu->cap);
1361 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1362 if (iommu->domains[num] == domain) {
1369 clear_bit(num, iommu->domain_ids);
1370 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1371 iommu->domains[num] = NULL;
1373 spin_unlock_irqrestore(&iommu->lock, flags);
1376 static struct iova_domain reserved_iova_list;
1377 static struct lock_class_key reserved_rbtree_key;
1379 static int dmar_init_reserved_ranges(void)
1381 struct pci_dev *pdev = NULL;
1385 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1387 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1388 &reserved_rbtree_key);
1390 /* IOAPIC ranges shouldn't be accessed by DMA */
1391 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1392 IOVA_PFN(IOAPIC_RANGE_END));
1394 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1398 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1399 for_each_pci_dev(pdev) {
1402 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1403 r = &pdev->resource[i];
1404 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1406 iova = reserve_iova(&reserved_iova_list,
1410 printk(KERN_ERR "Reserve iova failed\n");
1418 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1420 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1423 static inline int guestwidth_to_adjustwidth(int gaw)
1426 int r = (gaw - 12) % 9;
1437 static int domain_init(struct dmar_domain *domain, int guest_width)
1439 struct intel_iommu *iommu;
1440 int adjust_width, agaw;
1441 unsigned long sagaw;
1443 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1444 spin_lock_init(&domain->iommu_lock);
1446 domain_reserve_special_ranges(domain);
1448 /* calculate AGAW */
1449 iommu = domain_get_iommu(domain);
1450 if (guest_width > cap_mgaw(iommu->cap))
1451 guest_width = cap_mgaw(iommu->cap);
1452 domain->gaw = guest_width;
1453 adjust_width = guestwidth_to_adjustwidth(guest_width);
1454 agaw = width_to_agaw(adjust_width);
1455 sagaw = cap_sagaw(iommu->cap);
1456 if (!test_bit(agaw, &sagaw)) {
1457 /* hardware doesn't support it, choose a bigger one */
1458 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1459 agaw = find_next_bit(&sagaw, 5, agaw);
1463 domain->agaw = agaw;
1464 INIT_LIST_HEAD(&domain->devices);
1466 if (ecap_coherent(iommu->ecap))
1467 domain->iommu_coherency = 1;
1469 domain->iommu_coherency = 0;
1471 if (ecap_sc_support(iommu->ecap))
1472 domain->iommu_snooping = 1;
1474 domain->iommu_snooping = 0;
1476 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1477 domain->iommu_count = 1;
1478 domain->nid = iommu->node;
1480 /* always allocate the top pgd */
1481 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1484 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1488 static void domain_exit(struct dmar_domain *domain)
1490 struct dmar_drhd_unit *drhd;
1491 struct intel_iommu *iommu;
1493 /* Domain 0 is reserved, so dont process it */
1497 /* Flush any lazy unmaps that may reference this domain */
1498 if (!intel_iommu_strict)
1499 flush_unmaps_timeout(0);
1501 domain_remove_dev_info(domain);
1503 put_iova_domain(&domain->iovad);
1506 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1508 /* free page tables */
1509 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1511 for_each_active_iommu(iommu, drhd)
1512 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1513 iommu_detach_domain(domain, iommu);
1515 free_domain_mem(domain);
1518 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1519 u8 bus, u8 devfn, int translation)
1521 struct context_entry *context;
1522 unsigned long flags;
1523 struct intel_iommu *iommu;
1524 struct dma_pte *pgd;
1526 unsigned long ndomains;
1529 struct device_domain_info *info = NULL;
1531 pr_debug("Set context mapping for %02x:%02x.%d\n",
1532 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1534 BUG_ON(!domain->pgd);
1535 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1536 translation != CONTEXT_TT_MULTI_LEVEL);
1538 iommu = device_to_iommu(segment, bus, devfn);
1542 context = device_to_context_entry(iommu, bus, devfn);
1545 spin_lock_irqsave(&iommu->lock, flags);
1546 if (context_present(context)) {
1547 spin_unlock_irqrestore(&iommu->lock, flags);
1554 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1555 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1558 /* find an available domain id for this device in iommu */
1559 ndomains = cap_ndoms(iommu->cap);
1560 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1561 if (iommu->domains[num] == domain) {
1569 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1570 if (num >= ndomains) {
1571 spin_unlock_irqrestore(&iommu->lock, flags);
1572 printk(KERN_ERR "IOMMU: no free domain ids\n");
1576 set_bit(num, iommu->domain_ids);
1577 iommu->domains[num] = domain;
1581 /* Skip top levels of page tables for
1582 * iommu which has less agaw than default.
1583 * Unnecessary for PT mode.
1585 if (translation != CONTEXT_TT_PASS_THROUGH) {
1586 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1587 pgd = phys_to_virt(dma_pte_addr(pgd));
1588 if (!dma_pte_present(pgd)) {
1589 spin_unlock_irqrestore(&iommu->lock, flags);
1596 context_set_domain_id(context, id);
1598 if (translation != CONTEXT_TT_PASS_THROUGH) {
1599 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1600 translation = info ? CONTEXT_TT_DEV_IOTLB :
1601 CONTEXT_TT_MULTI_LEVEL;
1604 * In pass through mode, AW must be programmed to indicate the largest
1605 * AGAW value supported by hardware. And ASR is ignored by hardware.
1607 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1608 context_set_address_width(context, iommu->msagaw);
1610 context_set_address_root(context, virt_to_phys(pgd));
1611 context_set_address_width(context, iommu->agaw);
1614 context_set_translation_type(context, translation);
1615 context_set_fault_enable(context);
1616 context_set_present(context);
1617 domain_flush_cache(domain, context, sizeof(*context));
1620 * It's a non-present to present mapping. If hardware doesn't cache
1621 * non-present entry we only need to flush the write-buffer. If the
1622 * _does_ cache non-present entries, then it does so in the special
1623 * domain #0, which we have to flush:
1625 if (cap_caching_mode(iommu->cap)) {
1626 iommu->flush.flush_context(iommu, 0,
1627 (((u16)bus) << 8) | devfn,
1628 DMA_CCMD_MASK_NOBIT,
1629 DMA_CCMD_DEVICE_INVL);
1630 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1632 iommu_flush_write_buffer(iommu);
1634 iommu_enable_dev_iotlb(info);
1635 spin_unlock_irqrestore(&iommu->lock, flags);
1637 spin_lock_irqsave(&domain->iommu_lock, flags);
1638 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1639 domain->iommu_count++;
1640 if (domain->iommu_count == 1)
1641 domain->nid = iommu->node;
1642 domain_update_iommu_cap(domain);
1644 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1649 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1653 struct pci_dev *tmp, *parent;
1655 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1656 pdev->bus->number, pdev->devfn,
1661 /* dependent device mapping */
1662 tmp = pci_find_upstream_pcie_bridge(pdev);
1665 /* Secondary interface's bus number and devfn 0 */
1666 parent = pdev->bus->self;
1667 while (parent != tmp) {
1668 ret = domain_context_mapping_one(domain,
1669 pci_domain_nr(parent->bus),
1670 parent->bus->number,
1671 parent->devfn, translation);
1674 parent = parent->bus->self;
1676 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1677 return domain_context_mapping_one(domain,
1678 pci_domain_nr(tmp->subordinate),
1679 tmp->subordinate->number, 0,
1681 else /* this is a legacy PCI bridge */
1682 return domain_context_mapping_one(domain,
1683 pci_domain_nr(tmp->bus),
1689 static int domain_context_mapped(struct pci_dev *pdev)
1692 struct pci_dev *tmp, *parent;
1693 struct intel_iommu *iommu;
1695 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1700 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1703 /* dependent device mapping */
1704 tmp = pci_find_upstream_pcie_bridge(pdev);
1707 /* Secondary interface's bus number and devfn 0 */
1708 parent = pdev->bus->self;
1709 while (parent != tmp) {
1710 ret = device_context_mapped(iommu, parent->bus->number,
1714 parent = parent->bus->self;
1716 if (pci_is_pcie(tmp))
1717 return device_context_mapped(iommu, tmp->subordinate->number,
1720 return device_context_mapped(iommu, tmp->bus->number,
1724 /* Returns a number of VTD pages, but aligned to MM page size */
1725 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1728 host_addr &= ~PAGE_MASK;
1729 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1732 /* Return largest possible superpage level for a given mapping */
1733 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1734 unsigned long iov_pfn,
1735 unsigned long phy_pfn,
1736 unsigned long pages)
1738 int support, level = 1;
1739 unsigned long pfnmerge;
1741 support = domain->iommu_superpage;
1743 /* To use a large page, the virtual *and* physical addresses
1744 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1745 of them will mean we have to use smaller pages. So just
1746 merge them and check both at once. */
1747 pfnmerge = iov_pfn | phy_pfn;
1749 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1750 pages >>= VTD_STRIDE_SHIFT;
1753 pfnmerge >>= VTD_STRIDE_SHIFT;
1760 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1761 struct scatterlist *sg, unsigned long phys_pfn,
1762 unsigned long nr_pages, int prot)
1764 struct dma_pte *first_pte = NULL, *pte = NULL;
1765 phys_addr_t uninitialized_var(pteval);
1766 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1767 unsigned long sg_res;
1768 unsigned int largepage_lvl = 0;
1769 unsigned long lvl_pages = 0;
1771 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1773 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1776 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1781 sg_res = nr_pages + 1;
1782 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1785 while (nr_pages > 0) {
1789 sg_res = aligned_nrpages(sg->offset, sg->length);
1790 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1791 sg->dma_length = sg->length;
1792 pteval = page_to_phys(sg_page(sg)) | prot;
1793 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1797 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1799 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1802 /* It is large page*/
1803 if (largepage_lvl > 1)
1804 pteval |= DMA_PTE_LARGE_PAGE;
1806 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1809 /* We don't need lock here, nobody else
1810 * touches the iova range
1812 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1814 static int dumps = 5;
1815 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1816 iov_pfn, tmp, (unsigned long long)pteval);
1819 debug_dma_dump_mappings(NULL);
1824 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1826 BUG_ON(nr_pages < lvl_pages);
1827 BUG_ON(sg_res < lvl_pages);
1829 nr_pages -= lvl_pages;
1830 iov_pfn += lvl_pages;
1831 phys_pfn += lvl_pages;
1832 pteval += lvl_pages * VTD_PAGE_SIZE;
1833 sg_res -= lvl_pages;
1835 /* If the next PTE would be the first in a new page, then we
1836 need to flush the cache on the entries we've just written.
1837 And then we'll need to recalculate 'pte', so clear it and
1838 let it get set again in the if (!pte) block above.
1840 If we're done (!nr_pages) we need to flush the cache too.
1842 Also if we've been setting superpages, we may need to
1843 recalculate 'pte' and switch back to smaller pages for the
1844 end of the mapping, if the trailing size is not enough to
1845 use another superpage (i.e. sg_res < lvl_pages). */
1847 if (!nr_pages || first_pte_in_page(pte) ||
1848 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1849 domain_flush_cache(domain, first_pte,
1850 (void *)pte - (void *)first_pte);
1854 if (!sg_res && nr_pages)
1860 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1861 struct scatterlist *sg, unsigned long nr_pages,
1864 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1867 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1868 unsigned long phys_pfn, unsigned long nr_pages,
1871 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1874 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1879 clear_context_table(iommu, bus, devfn);
1880 iommu->flush.flush_context(iommu, 0, 0, 0,
1881 DMA_CCMD_GLOBAL_INVL);
1882 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1885 static void domain_remove_dev_info(struct dmar_domain *domain)
1887 struct device_domain_info *info;
1888 unsigned long flags;
1889 struct intel_iommu *iommu;
1891 spin_lock_irqsave(&device_domain_lock, flags);
1892 while (!list_empty(&domain->devices)) {
1893 info = list_entry(domain->devices.next,
1894 struct device_domain_info, link);
1895 list_del(&info->link);
1896 list_del(&info->global);
1898 info->dev->dev.archdata.iommu = NULL;
1899 spin_unlock_irqrestore(&device_domain_lock, flags);
1901 iommu_disable_dev_iotlb(info);
1902 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1903 iommu_detach_dev(iommu, info->bus, info->devfn);
1904 free_devinfo_mem(info);
1906 spin_lock_irqsave(&device_domain_lock, flags);
1908 spin_unlock_irqrestore(&device_domain_lock, flags);
1913 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1915 static struct dmar_domain *
1916 find_domain(struct pci_dev *pdev)
1918 struct device_domain_info *info;
1920 /* No lock here, assumes no domain exit in normal case */
1921 info = pdev->dev.archdata.iommu;
1923 return info->domain;
1927 /* domain is initialized */
1928 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1930 struct dmar_domain *domain, *found = NULL;
1931 struct intel_iommu *iommu;
1932 struct dmar_drhd_unit *drhd;
1933 struct device_domain_info *info, *tmp;
1934 struct pci_dev *dev_tmp;
1935 unsigned long flags;
1936 int bus = 0, devfn = 0;
1940 domain = find_domain(pdev);
1944 segment = pci_domain_nr(pdev->bus);
1946 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1948 if (pci_is_pcie(dev_tmp)) {
1949 bus = dev_tmp->subordinate->number;
1952 bus = dev_tmp->bus->number;
1953 devfn = dev_tmp->devfn;
1955 spin_lock_irqsave(&device_domain_lock, flags);
1956 list_for_each_entry(info, &device_domain_list, global) {
1957 if (info->segment == segment &&
1958 info->bus == bus && info->devfn == devfn) {
1959 found = info->domain;
1963 spin_unlock_irqrestore(&device_domain_lock, flags);
1964 /* pcie-pci bridge already has a domain, uses it */
1971 domain = alloc_domain();
1975 /* Allocate new domain for the device */
1976 drhd = dmar_find_matched_drhd_unit(pdev);
1978 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1982 iommu = drhd->iommu;
1984 ret = iommu_attach_domain(domain, iommu);
1986 free_domain_mem(domain);
1990 if (domain_init(domain, gaw)) {
1991 domain_exit(domain);
1995 /* register pcie-to-pci device */
1997 info = alloc_devinfo_mem();
1999 domain_exit(domain);
2002 info->segment = segment;
2004 info->devfn = devfn;
2006 info->domain = domain;
2007 /* This domain is shared by devices under p2p bridge */
2008 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2010 /* pcie-to-pci bridge already has a domain, uses it */
2012 spin_lock_irqsave(&device_domain_lock, flags);
2013 list_for_each_entry(tmp, &device_domain_list, global) {
2014 if (tmp->segment == segment &&
2015 tmp->bus == bus && tmp->devfn == devfn) {
2016 found = tmp->domain;
2021 spin_unlock_irqrestore(&device_domain_lock, flags);
2022 free_devinfo_mem(info);
2023 domain_exit(domain);
2026 list_add(&info->link, &domain->devices);
2027 list_add(&info->global, &device_domain_list);
2028 spin_unlock_irqrestore(&device_domain_lock, flags);
2033 info = alloc_devinfo_mem();
2036 info->segment = segment;
2037 info->bus = pdev->bus->number;
2038 info->devfn = pdev->devfn;
2040 info->domain = domain;
2041 spin_lock_irqsave(&device_domain_lock, flags);
2042 /* somebody is fast */
2043 found = find_domain(pdev);
2044 if (found != NULL) {
2045 spin_unlock_irqrestore(&device_domain_lock, flags);
2046 if (found != domain) {
2047 domain_exit(domain);
2050 free_devinfo_mem(info);
2053 list_add(&info->link, &domain->devices);
2054 list_add(&info->global, &device_domain_list);
2055 pdev->dev.archdata.iommu = info;
2056 spin_unlock_irqrestore(&device_domain_lock, flags);
2059 /* recheck it here, maybe others set it */
2060 return find_domain(pdev);
2063 static int iommu_identity_mapping;
2064 #define IDENTMAP_ALL 1
2065 #define IDENTMAP_GFX 2
2066 #define IDENTMAP_AZALIA 4
2068 static int iommu_domain_identity_map(struct dmar_domain *domain,
2069 unsigned long long start,
2070 unsigned long long end)
2072 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2073 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2075 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2076 dma_to_mm_pfn(last_vpfn))) {
2077 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2081 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2082 start, end, domain->id);
2084 * RMRR range might have overlap with physical memory range,
2087 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2089 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2090 last_vpfn - first_vpfn + 1,
2091 DMA_PTE_READ|DMA_PTE_WRITE);
2094 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2095 unsigned long long start,
2096 unsigned long long end)
2098 struct dmar_domain *domain;
2101 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2105 /* For _hardware_ passthrough, don't bother. But for software
2106 passthrough, we do it anyway -- it may indicate a memory
2107 range which is reserved in E820, so which didn't get set
2108 up to start with in si_domain */
2109 if (domain == si_domain && hw_pass_through) {
2110 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2111 pci_name(pdev), start, end);
2116 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2117 pci_name(pdev), start, end);
2120 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2121 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2122 dmi_get_system_info(DMI_BIOS_VENDOR),
2123 dmi_get_system_info(DMI_BIOS_VERSION),
2124 dmi_get_system_info(DMI_PRODUCT_VERSION));
2129 if (end >> agaw_to_width(domain->agaw)) {
2130 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2131 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2132 agaw_to_width(domain->agaw),
2133 dmi_get_system_info(DMI_BIOS_VENDOR),
2134 dmi_get_system_info(DMI_BIOS_VERSION),
2135 dmi_get_system_info(DMI_PRODUCT_VERSION));
2140 ret = iommu_domain_identity_map(domain, start, end);
2144 /* context entry init */
2145 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2152 domain_exit(domain);
2156 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2157 struct pci_dev *pdev)
2159 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2161 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2165 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2166 static inline void iommu_prepare_isa(void)
2168 struct pci_dev *pdev;
2171 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2175 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2176 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2179 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2180 "floppy might not work\n");
2184 static inline void iommu_prepare_isa(void)
2188 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2190 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2192 static int __init si_domain_init(int hw)
2194 struct dmar_drhd_unit *drhd;
2195 struct intel_iommu *iommu;
2198 si_domain = alloc_domain();
2202 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2204 for_each_active_iommu(iommu, drhd) {
2205 ret = iommu_attach_domain(si_domain, iommu);
2207 domain_exit(si_domain);
2212 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2213 domain_exit(si_domain);
2217 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2222 for_each_online_node(nid) {
2223 unsigned long start_pfn, end_pfn;
2226 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2227 ret = iommu_domain_identity_map(si_domain,
2228 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2237 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2238 struct pci_dev *pdev);
2239 static int identity_mapping(struct pci_dev *pdev)
2241 struct device_domain_info *info;
2243 if (likely(!iommu_identity_mapping))
2246 info = pdev->dev.archdata.iommu;
2247 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2248 return (info->domain == si_domain);
2253 static int domain_add_dev_info(struct dmar_domain *domain,
2254 struct pci_dev *pdev,
2257 struct device_domain_info *info;
2258 unsigned long flags;
2261 info = alloc_devinfo_mem();
2265 ret = domain_context_mapping(domain, pdev, translation);
2267 free_devinfo_mem(info);
2271 info->segment = pci_domain_nr(pdev->bus);
2272 info->bus = pdev->bus->number;
2273 info->devfn = pdev->devfn;
2275 info->domain = domain;
2277 spin_lock_irqsave(&device_domain_lock, flags);
2278 list_add(&info->link, &domain->devices);
2279 list_add(&info->global, &device_domain_list);
2280 pdev->dev.archdata.iommu = info;
2281 spin_unlock_irqrestore(&device_domain_lock, flags);
2286 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2288 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2291 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2294 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2298 * We want to start off with all devices in the 1:1 domain, and
2299 * take them out later if we find they can't access all of memory.
2301 * However, we can't do this for PCI devices behind bridges,
2302 * because all PCI devices behind the same bridge will end up
2303 * with the same source-id on their transactions.
2305 * Practically speaking, we can't change things around for these
2306 * devices at run-time, because we can't be sure there'll be no
2307 * DMA transactions in flight for any of their siblings.
2309 * So PCI devices (unless they're on the root bus) as well as
2310 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2311 * the 1:1 domain, just in _case_ one of their siblings turns out
2312 * not to be able to map all of memory.
2314 if (!pci_is_pcie(pdev)) {
2315 if (!pci_is_root_bus(pdev->bus))
2317 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2319 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2323 * At boot time, we don't yet know if devices will be 64-bit capable.
2324 * Assume that they will -- if they turn out not to be, then we can
2325 * take them out of the 1:1 domain later.
2329 * If the device's dma_mask is less than the system's memory
2330 * size then this is not a candidate for identity mapping.
2332 u64 dma_mask = pdev->dma_mask;
2334 if (pdev->dev.coherent_dma_mask &&
2335 pdev->dev.coherent_dma_mask < dma_mask)
2336 dma_mask = pdev->dev.coherent_dma_mask;
2338 return dma_mask >= dma_get_required_mask(&pdev->dev);
2344 static int __init iommu_prepare_static_identity_mapping(int hw)
2346 struct pci_dev *pdev = NULL;
2349 ret = si_domain_init(hw);
2353 for_each_pci_dev(pdev) {
2354 /* Skip Host/PCI Bridge devices */
2355 if (IS_BRIDGE_HOST_DEVICE(pdev))
2357 if (iommu_should_identity_map(pdev, 1)) {
2358 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2359 hw ? "hardware" : "software", pci_name(pdev));
2361 ret = domain_add_dev_info(si_domain, pdev,
2362 hw ? CONTEXT_TT_PASS_THROUGH :
2363 CONTEXT_TT_MULTI_LEVEL);
2372 static int __init init_dmars(void)
2374 struct dmar_drhd_unit *drhd;
2375 struct dmar_rmrr_unit *rmrr;
2376 struct pci_dev *pdev;
2377 struct intel_iommu *iommu;
2383 * initialize and program root entry to not present
2386 for_each_drhd_unit(drhd) {
2389 * lock not needed as this is only incremented in the single
2390 * threaded kernel __init code path all other access are read
2395 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2398 printk(KERN_ERR "Allocating global iommu array failed\n");
2403 deferred_flush = kzalloc(g_num_of_iommus *
2404 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2405 if (!deferred_flush) {
2410 for_each_drhd_unit(drhd) {
2414 iommu = drhd->iommu;
2415 g_iommus[iommu->seq_id] = iommu;
2417 ret = iommu_init_domains(iommu);
2423 * we could share the same root & context tables
2424 * among all IOMMU's. Need to Split it later.
2426 ret = iommu_alloc_root_entry(iommu);
2428 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2431 if (!ecap_pass_through(iommu->ecap))
2432 hw_pass_through = 0;
2436 * Start from the sane iommu hardware state.
2438 for_each_drhd_unit(drhd) {
2442 iommu = drhd->iommu;
2445 * If the queued invalidation is already initialized by us
2446 * (for example, while enabling interrupt-remapping) then
2447 * we got the things already rolling from a sane state.
2453 * Clear any previous faults.
2455 dmar_fault(-1, iommu);
2457 * Disable queued invalidation if supported and already enabled
2458 * before OS handover.
2460 dmar_disable_qi(iommu);
2463 for_each_drhd_unit(drhd) {
2467 iommu = drhd->iommu;
2469 if (dmar_enable_qi(iommu)) {
2471 * Queued Invalidate not enabled, use Register Based
2474 iommu->flush.flush_context = __iommu_flush_context;
2475 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2476 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2479 (unsigned long long)drhd->reg_base_addr);
2481 iommu->flush.flush_context = qi_flush_context;
2482 iommu->flush.flush_iotlb = qi_flush_iotlb;
2483 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2486 (unsigned long long)drhd->reg_base_addr);
2490 if (iommu_pass_through)
2491 iommu_identity_mapping |= IDENTMAP_ALL;
2493 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2494 iommu_identity_mapping |= IDENTMAP_GFX;
2497 check_tylersburg_isoch();
2500 * If pass through is not set or not enabled, setup context entries for
2501 * identity mappings for rmrr, gfx, and isa and may fall back to static
2502 * identity mapping if iommu_identity_mapping is set.
2504 if (iommu_identity_mapping) {
2505 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2507 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2513 * for each dev attached to rmrr
2515 * locate drhd for dev, alloc domain for dev
2516 * allocate free domain
2517 * allocate page table entries for rmrr
2518 * if context not allocated for bus
2519 * allocate and init context
2520 * set present in root table for this bus
2521 * init context with domain, translation etc
2525 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2526 for_each_rmrr_units(rmrr) {
2527 for (i = 0; i < rmrr->devices_cnt; i++) {
2528 pdev = rmrr->devices[i];
2530 * some BIOS lists non-exist devices in DMAR
2535 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2538 "IOMMU: mapping reserved region failed\n");
2542 iommu_prepare_isa();
2547 * global invalidate context cache
2548 * global invalidate iotlb
2549 * enable translation
2551 for_each_drhd_unit(drhd) {
2552 if (drhd->ignored) {
2554 * we always have to disable PMRs or DMA may fail on
2558 iommu_disable_protect_mem_regions(drhd->iommu);
2561 iommu = drhd->iommu;
2563 iommu_flush_write_buffer(iommu);
2565 ret = dmar_set_interrupt(iommu);
2569 iommu_set_root_entry(iommu);
2571 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2572 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2574 ret = iommu_enable_translation(iommu);
2578 iommu_disable_protect_mem_regions(iommu);
2583 for_each_drhd_unit(drhd) {
2586 iommu = drhd->iommu;
2593 /* This takes a number of _MM_ pages, not VTD pages */
2594 static struct iova *intel_alloc_iova(struct device *dev,
2595 struct dmar_domain *domain,
2596 unsigned long nrpages, uint64_t dma_mask)
2598 struct pci_dev *pdev = to_pci_dev(dev);
2599 struct iova *iova = NULL;
2601 /* Restrict dma_mask to the width that the iommu can handle */
2602 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2604 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2606 * First try to allocate an io virtual address in
2607 * DMA_BIT_MASK(32) and if that fails then try allocating
2610 iova = alloc_iova(&domain->iovad, nrpages,
2611 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2615 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2616 if (unlikely(!iova)) {
2617 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2618 nrpages, pci_name(pdev));
2625 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2627 struct dmar_domain *domain;
2630 domain = get_domain_for_dev(pdev,
2631 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2634 "Allocating domain for %s failed", pci_name(pdev));
2638 /* make sure context mapping is ok */
2639 if (unlikely(!domain_context_mapped(pdev))) {
2640 ret = domain_context_mapping(domain, pdev,
2641 CONTEXT_TT_MULTI_LEVEL);
2644 "Domain context map for %s failed",
2653 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2655 struct device_domain_info *info;
2657 /* No lock here, assumes no domain exit in normal case */
2658 info = dev->dev.archdata.iommu;
2660 return info->domain;
2662 return __get_valid_domain_for_dev(dev);
2665 static int iommu_dummy(struct pci_dev *pdev)
2667 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2670 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2671 static int iommu_no_mapping(struct device *dev)
2673 struct pci_dev *pdev;
2676 if (unlikely(dev->bus != &pci_bus_type))
2679 pdev = to_pci_dev(dev);
2680 if (iommu_dummy(pdev))
2683 if (!iommu_identity_mapping)
2686 found = identity_mapping(pdev);
2688 if (iommu_should_identity_map(pdev, 0))
2692 * 32 bit DMA is removed from si_domain and fall back
2693 * to non-identity mapping.
2695 domain_remove_one_dev_info(si_domain, pdev);
2696 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2702 * In case of a detached 64 bit DMA device from vm, the device
2703 * is put into si_domain for identity mapping.
2705 if (iommu_should_identity_map(pdev, 0)) {
2707 ret = domain_add_dev_info(si_domain, pdev,
2709 CONTEXT_TT_PASS_THROUGH :
2710 CONTEXT_TT_MULTI_LEVEL);
2712 printk(KERN_INFO "64bit %s uses identity mapping\n",
2722 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2723 size_t size, int dir, u64 dma_mask)
2725 struct pci_dev *pdev = to_pci_dev(hwdev);
2726 struct dmar_domain *domain;
2727 phys_addr_t start_paddr;
2731 struct intel_iommu *iommu;
2732 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2734 BUG_ON(dir == DMA_NONE);
2736 if (iommu_no_mapping(hwdev))
2739 domain = get_valid_domain_for_dev(pdev);
2743 iommu = domain_get_iommu(domain);
2744 size = aligned_nrpages(paddr, size);
2746 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2751 * Check if DMAR supports zero-length reads on write only
2754 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2755 !cap_zlr(iommu->cap))
2756 prot |= DMA_PTE_READ;
2757 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2758 prot |= DMA_PTE_WRITE;
2760 * paddr - (paddr + size) might be partial page, we should map the whole
2761 * page. Note: if two part of one page are separately mapped, we
2762 * might have two guest_addr mapping to the same host paddr, but this
2763 * is not a big problem
2765 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2766 mm_to_dma_pfn(paddr_pfn), size, prot);
2770 /* it's a non-present to present mapping. Only flush if caching mode */
2771 if (cap_caching_mode(iommu->cap))
2772 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2774 iommu_flush_write_buffer(iommu);
2776 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2777 start_paddr += paddr & ~PAGE_MASK;
2782 __free_iova(&domain->iovad, iova);
2783 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2784 pci_name(pdev), size, (unsigned long long)paddr, dir);
2788 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2789 unsigned long offset, size_t size,
2790 enum dma_data_direction dir,
2791 struct dma_attrs *attrs)
2793 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2794 dir, to_pci_dev(dev)->dma_mask);
2797 static void flush_unmaps(void)
2803 /* just flush them all */
2804 for (i = 0; i < g_num_of_iommus; i++) {
2805 struct intel_iommu *iommu = g_iommus[i];
2809 if (!deferred_flush[i].next)
2812 /* In caching mode, global flushes turn emulation expensive */
2813 if (!cap_caching_mode(iommu->cap))
2814 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2815 DMA_TLB_GLOBAL_FLUSH);
2816 for (j = 0; j < deferred_flush[i].next; j++) {
2818 struct iova *iova = deferred_flush[i].iova[j];
2819 struct dmar_domain *domain = deferred_flush[i].domain[j];
2821 /* On real hardware multiple invalidations are expensive */
2822 if (cap_caching_mode(iommu->cap))
2823 iommu_flush_iotlb_psi(iommu, domain->id,
2824 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2826 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2827 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2828 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2830 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2832 deferred_flush[i].next = 0;
2838 static void flush_unmaps_timeout(unsigned long data)
2840 unsigned long flags;
2842 spin_lock_irqsave(&async_umap_flush_lock, flags);
2844 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2847 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2849 unsigned long flags;
2851 struct intel_iommu *iommu;
2853 spin_lock_irqsave(&async_umap_flush_lock, flags);
2854 if (list_size == HIGH_WATER_MARK)
2857 iommu = domain_get_iommu(dom);
2858 iommu_id = iommu->seq_id;
2860 next = deferred_flush[iommu_id].next;
2861 deferred_flush[iommu_id].domain[next] = dom;
2862 deferred_flush[iommu_id].iova[next] = iova;
2863 deferred_flush[iommu_id].next++;
2866 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2870 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2873 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2874 size_t size, enum dma_data_direction dir,
2875 struct dma_attrs *attrs)
2877 struct pci_dev *pdev = to_pci_dev(dev);
2878 struct dmar_domain *domain;
2879 unsigned long start_pfn, last_pfn;
2881 struct intel_iommu *iommu;
2883 if (iommu_no_mapping(dev))
2886 domain = find_domain(pdev);
2889 iommu = domain_get_iommu(domain);
2891 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2892 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2893 (unsigned long long)dev_addr))
2896 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2897 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2899 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2900 pci_name(pdev), start_pfn, last_pfn);
2902 /* clear the whole page */
2903 dma_pte_clear_range(domain, start_pfn, last_pfn);
2905 /* free page tables */
2906 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2908 if (intel_iommu_strict) {
2909 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2910 last_pfn - start_pfn + 1, 0);
2912 __free_iova(&domain->iovad, iova);
2914 add_unmap(domain, iova);
2916 * queue up the release of the unmap to save the 1/6th of the
2917 * cpu used up by the iotlb flush operation...
2922 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2923 dma_addr_t *dma_handle, gfp_t flags)
2928 size = PAGE_ALIGN(size);
2929 order = get_order(size);
2931 if (!iommu_no_mapping(hwdev))
2932 flags &= ~(GFP_DMA | GFP_DMA32);
2933 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2934 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2940 vaddr = (void *)__get_free_pages(flags, order);
2943 memset(vaddr, 0, size);
2945 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2947 hwdev->coherent_dma_mask);
2950 free_pages((unsigned long)vaddr, order);
2954 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2955 dma_addr_t dma_handle)
2959 size = PAGE_ALIGN(size);
2960 order = get_order(size);
2962 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2963 free_pages((unsigned long)vaddr, order);
2966 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2967 int nelems, enum dma_data_direction dir,
2968 struct dma_attrs *attrs)
2970 struct pci_dev *pdev = to_pci_dev(hwdev);
2971 struct dmar_domain *domain;
2972 unsigned long start_pfn, last_pfn;
2974 struct intel_iommu *iommu;
2976 if (iommu_no_mapping(hwdev))
2979 domain = find_domain(pdev);
2982 iommu = domain_get_iommu(domain);
2984 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2985 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2986 (unsigned long long)sglist[0].dma_address))
2989 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2990 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2992 /* clear the whole page */
2993 dma_pte_clear_range(domain, start_pfn, last_pfn);
2995 /* free page tables */
2996 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2998 if (intel_iommu_strict) {
2999 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3000 last_pfn - start_pfn + 1, 0);
3002 __free_iova(&domain->iovad, iova);
3004 add_unmap(domain, iova);
3006 * queue up the release of the unmap to save the 1/6th of the
3007 * cpu used up by the iotlb flush operation...
3012 static int intel_nontranslate_map_sg(struct device *hddev,
3013 struct scatterlist *sglist, int nelems, int dir)
3016 struct scatterlist *sg;
3018 for_each_sg(sglist, sg, nelems, i) {
3019 BUG_ON(!sg_page(sg));
3020 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3021 sg->dma_length = sg->length;
3026 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3027 enum dma_data_direction dir, struct dma_attrs *attrs)
3030 struct pci_dev *pdev = to_pci_dev(hwdev);
3031 struct dmar_domain *domain;
3034 struct iova *iova = NULL;
3036 struct scatterlist *sg;
3037 unsigned long start_vpfn;
3038 struct intel_iommu *iommu;
3040 BUG_ON(dir == DMA_NONE);
3041 if (iommu_no_mapping(hwdev))
3042 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3044 domain = get_valid_domain_for_dev(pdev);
3048 iommu = domain_get_iommu(domain);
3050 for_each_sg(sglist, sg, nelems, i)
3051 size += aligned_nrpages(sg->offset, sg->length);
3053 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3056 sglist->dma_length = 0;
3061 * Check if DMAR supports zero-length reads on write only
3064 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3065 !cap_zlr(iommu->cap))
3066 prot |= DMA_PTE_READ;
3067 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3068 prot |= DMA_PTE_WRITE;
3070 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3072 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3073 if (unlikely(ret)) {
3074 /* clear the page */
3075 dma_pte_clear_range(domain, start_vpfn,
3076 start_vpfn + size - 1);
3077 /* free page tables */
3078 dma_pte_free_pagetable(domain, start_vpfn,
3079 start_vpfn + size - 1);
3081 __free_iova(&domain->iovad, iova);
3085 /* it's a non-present to present mapping. Only flush if caching mode */
3086 if (cap_caching_mode(iommu->cap))
3087 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3089 iommu_flush_write_buffer(iommu);
3094 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3099 struct dma_map_ops intel_dma_ops = {
3100 .alloc_coherent = intel_alloc_coherent,
3101 .free_coherent = intel_free_coherent,
3102 .map_sg = intel_map_sg,
3103 .unmap_sg = intel_unmap_sg,
3104 .map_page = intel_map_page,
3105 .unmap_page = intel_unmap_page,
3106 .mapping_error = intel_mapping_error,
3109 static inline int iommu_domain_cache_init(void)
3113 iommu_domain_cache = kmem_cache_create("iommu_domain",
3114 sizeof(struct dmar_domain),
3119 if (!iommu_domain_cache) {
3120 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3127 static inline int iommu_devinfo_cache_init(void)
3131 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3132 sizeof(struct device_domain_info),
3136 if (!iommu_devinfo_cache) {
3137 printk(KERN_ERR "Couldn't create devinfo cache\n");
3144 static inline int iommu_iova_cache_init(void)
3148 iommu_iova_cache = kmem_cache_create("iommu_iova",
3149 sizeof(struct iova),
3153 if (!iommu_iova_cache) {
3154 printk(KERN_ERR "Couldn't create iova cache\n");
3161 static int __init iommu_init_mempool(void)
3164 ret = iommu_iova_cache_init();
3168 ret = iommu_domain_cache_init();
3172 ret = iommu_devinfo_cache_init();
3176 kmem_cache_destroy(iommu_domain_cache);
3178 kmem_cache_destroy(iommu_iova_cache);
3183 static void __init iommu_exit_mempool(void)
3185 kmem_cache_destroy(iommu_devinfo_cache);
3186 kmem_cache_destroy(iommu_domain_cache);
3187 kmem_cache_destroy(iommu_iova_cache);
3191 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3193 struct dmar_drhd_unit *drhd;
3197 /* We know that this device on this chipset has its own IOMMU.
3198 * If we find it under a different IOMMU, then the BIOS is lying
3199 * to us. Hope that the IOMMU for this device is actually
3200 * disabled, and it needs no translation...
3202 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3204 /* "can't" happen */
3205 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3208 vtbar &= 0xffff0000;
3210 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3211 drhd = dmar_find_matched_drhd_unit(pdev);
3212 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3213 TAINT_FIRMWARE_WORKAROUND,
3214 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3215 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3217 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3219 static void __init init_no_remapping_devices(void)
3221 struct dmar_drhd_unit *drhd;
3223 for_each_drhd_unit(drhd) {
3224 if (!drhd->include_all) {
3226 for (i = 0; i < drhd->devices_cnt; i++)
3227 if (drhd->devices[i] != NULL)
3229 /* ignore DMAR unit if no pci devices exist */
3230 if (i == drhd->devices_cnt)
3235 for_each_drhd_unit(drhd) {
3237 if (drhd->ignored || drhd->include_all)
3240 for (i = 0; i < drhd->devices_cnt; i++)
3241 if (drhd->devices[i] &&
3242 !IS_GFX_DEVICE(drhd->devices[i]))
3245 if (i < drhd->devices_cnt)
3248 /* This IOMMU has *only* gfx devices. Either bypass it or
3249 set the gfx_mapped flag, as appropriate */
3251 intel_iommu_gfx_mapped = 1;
3254 for (i = 0; i < drhd->devices_cnt; i++) {
3255 if (!drhd->devices[i])
3257 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3263 #ifdef CONFIG_SUSPEND
3264 static int init_iommu_hw(void)
3266 struct dmar_drhd_unit *drhd;
3267 struct intel_iommu *iommu = NULL;
3269 for_each_active_iommu(iommu, drhd)
3271 dmar_reenable_qi(iommu);
3273 for_each_iommu(iommu, drhd) {
3274 if (drhd->ignored) {
3276 * we always have to disable PMRs or DMA may fail on
3280 iommu_disable_protect_mem_regions(iommu);
3284 iommu_flush_write_buffer(iommu);
3286 iommu_set_root_entry(iommu);
3288 iommu->flush.flush_context(iommu, 0, 0, 0,
3289 DMA_CCMD_GLOBAL_INVL);
3290 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3291 DMA_TLB_GLOBAL_FLUSH);
3292 if (iommu_enable_translation(iommu))
3294 iommu_disable_protect_mem_regions(iommu);
3300 static void iommu_flush_all(void)
3302 struct dmar_drhd_unit *drhd;
3303 struct intel_iommu *iommu;
3305 for_each_active_iommu(iommu, drhd) {
3306 iommu->flush.flush_context(iommu, 0, 0, 0,
3307 DMA_CCMD_GLOBAL_INVL);
3308 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3309 DMA_TLB_GLOBAL_FLUSH);
3313 static int iommu_suspend(void)
3315 struct dmar_drhd_unit *drhd;
3316 struct intel_iommu *iommu = NULL;
3319 for_each_active_iommu(iommu, drhd) {
3320 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3322 if (!iommu->iommu_state)
3328 for_each_active_iommu(iommu, drhd) {
3329 iommu_disable_translation(iommu);
3331 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3333 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3334 readl(iommu->reg + DMAR_FECTL_REG);
3335 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3336 readl(iommu->reg + DMAR_FEDATA_REG);
3337 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3338 readl(iommu->reg + DMAR_FEADDR_REG);
3339 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3340 readl(iommu->reg + DMAR_FEUADDR_REG);
3342 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3347 for_each_active_iommu(iommu, drhd)
3348 kfree(iommu->iommu_state);
3353 static void iommu_resume(void)
3355 struct dmar_drhd_unit *drhd;
3356 struct intel_iommu *iommu = NULL;
3359 if (init_iommu_hw()) {
3361 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3363 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3367 for_each_active_iommu(iommu, drhd) {
3369 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3371 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3372 iommu->reg + DMAR_FECTL_REG);
3373 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3374 iommu->reg + DMAR_FEDATA_REG);
3375 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3376 iommu->reg + DMAR_FEADDR_REG);
3377 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3378 iommu->reg + DMAR_FEUADDR_REG);
3380 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3383 for_each_active_iommu(iommu, drhd)
3384 kfree(iommu->iommu_state);
3387 static struct syscore_ops iommu_syscore_ops = {
3388 .resume = iommu_resume,
3389 .suspend = iommu_suspend,
3392 static void __init init_iommu_pm_ops(void)
3394 register_syscore_ops(&iommu_syscore_ops);
3398 static inline void init_iommu_pm_ops(void) {}
3399 #endif /* CONFIG_PM */
3401 LIST_HEAD(dmar_rmrr_units);
3403 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3405 list_add(&rmrr->list, &dmar_rmrr_units);
3409 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3411 struct acpi_dmar_reserved_memory *rmrr;
3412 struct dmar_rmrr_unit *rmrru;
3414 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3418 rmrru->hdr = header;
3419 rmrr = (struct acpi_dmar_reserved_memory *)header;
3420 rmrru->base_address = rmrr->base_address;
3421 rmrru->end_address = rmrr->end_address;
3423 dmar_register_rmrr_unit(rmrru);
3428 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3430 struct acpi_dmar_reserved_memory *rmrr;
3433 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3434 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3435 ((void *)rmrr) + rmrr->header.length,
3436 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3438 if (ret || (rmrru->devices_cnt == 0)) {
3439 list_del(&rmrru->list);
3445 static LIST_HEAD(dmar_atsr_units);
3447 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3449 struct acpi_dmar_atsr *atsr;
3450 struct dmar_atsr_unit *atsru;
3452 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3453 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3458 atsru->include_all = atsr->flags & 0x1;
3460 list_add(&atsru->list, &dmar_atsr_units);
3465 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3468 struct acpi_dmar_atsr *atsr;
3470 if (atsru->include_all)
3473 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3474 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3475 (void *)atsr + atsr->header.length,
3476 &atsru->devices_cnt, &atsru->devices,
3478 if (rc || !atsru->devices_cnt) {
3479 list_del(&atsru->list);
3486 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3489 struct pci_bus *bus;
3490 struct acpi_dmar_atsr *atsr;
3491 struct dmar_atsr_unit *atsru;
3493 dev = pci_physfn(dev);
3495 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3496 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3497 if (atsr->segment == pci_domain_nr(dev->bus))
3504 for (bus = dev->bus; bus; bus = bus->parent) {
3505 struct pci_dev *bridge = bus->self;
3507 if (!bridge || !pci_is_pcie(bridge) ||
3508 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3511 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3512 for (i = 0; i < atsru->devices_cnt; i++)
3513 if (atsru->devices[i] == bridge)
3519 if (atsru->include_all)
3525 int __init dmar_parse_rmrr_atsr_dev(void)
3527 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3528 struct dmar_atsr_unit *atsr, *atsr_n;
3531 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3532 ret = rmrr_parse_dev(rmrr);
3537 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3538 ret = atsr_parse_dev(atsr);
3547 * Here we only respond to action of unbound device from driver.
3549 * Added device is not attached to its DMAR domain here yet. That will happen
3550 * when mapping the device to iova.
3552 static int device_notifier(struct notifier_block *nb,
3553 unsigned long action, void *data)
3555 struct device *dev = data;
3556 struct pci_dev *pdev = to_pci_dev(dev);
3557 struct dmar_domain *domain;
3559 if (iommu_no_mapping(dev))
3562 domain = find_domain(pdev);
3566 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3567 domain_remove_one_dev_info(domain, pdev);
3569 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3570 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3571 list_empty(&domain->devices))
3572 domain_exit(domain);
3578 static struct notifier_block device_nb = {
3579 .notifier_call = device_notifier,
3582 int __init intel_iommu_init(void)
3586 /* VT-d is required for a TXT/tboot launch, so enforce that */
3587 force_on = tboot_force_iommu();
3589 if (dmar_table_init()) {
3591 panic("tboot: Failed to initialize DMAR table\n");
3595 if (dmar_dev_scope_init() < 0) {
3597 panic("tboot: Failed to initialize DMAR device scope\n");
3601 if (no_iommu || dmar_disabled)
3604 if (iommu_init_mempool()) {
3606 panic("tboot: Failed to initialize iommu memory\n");
3610 if (list_empty(&dmar_rmrr_units))
3611 printk(KERN_INFO "DMAR: No RMRR found\n");
3613 if (list_empty(&dmar_atsr_units))
3614 printk(KERN_INFO "DMAR: No ATSR found\n");
3616 if (dmar_init_reserved_ranges()) {
3618 panic("tboot: Failed to reserve iommu ranges\n");
3622 init_no_remapping_devices();
3627 panic("tboot: Failed to initialize DMARs\n");
3628 printk(KERN_ERR "IOMMU: dmar init failed\n");
3629 put_iova_domain(&reserved_iova_list);
3630 iommu_exit_mempool();
3634 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3636 init_timer(&unmap_timer);
3637 #ifdef CONFIG_SWIOTLB
3640 dma_ops = &intel_dma_ops;
3642 init_iommu_pm_ops();
3644 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3646 bus_register_notifier(&pci_bus_type, &device_nb);
3648 intel_iommu_enabled = 1;
3653 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3654 struct pci_dev *pdev)
3656 struct pci_dev *tmp, *parent;
3658 if (!iommu || !pdev)
3661 /* dependent device detach */
3662 tmp = pci_find_upstream_pcie_bridge(pdev);
3663 /* Secondary interface's bus number and devfn 0 */
3665 parent = pdev->bus->self;
3666 while (parent != tmp) {
3667 iommu_detach_dev(iommu, parent->bus->number,
3669 parent = parent->bus->self;
3671 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3672 iommu_detach_dev(iommu,
3673 tmp->subordinate->number, 0);
3674 else /* this is a legacy PCI bridge */
3675 iommu_detach_dev(iommu, tmp->bus->number,
3680 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3681 struct pci_dev *pdev)
3683 struct device_domain_info *info;
3684 struct intel_iommu *iommu;
3685 unsigned long flags;
3687 struct list_head *entry, *tmp;
3689 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3694 spin_lock_irqsave(&device_domain_lock, flags);
3695 list_for_each_safe(entry, tmp, &domain->devices) {
3696 info = list_entry(entry, struct device_domain_info, link);
3697 if (info->segment == pci_domain_nr(pdev->bus) &&
3698 info->bus == pdev->bus->number &&
3699 info->devfn == pdev->devfn) {
3700 list_del(&info->link);
3701 list_del(&info->global);
3703 info->dev->dev.archdata.iommu = NULL;
3704 spin_unlock_irqrestore(&device_domain_lock, flags);
3706 iommu_disable_dev_iotlb(info);
3707 iommu_detach_dev(iommu, info->bus, info->devfn);
3708 iommu_detach_dependent_devices(iommu, pdev);
3709 free_devinfo_mem(info);
3711 spin_lock_irqsave(&device_domain_lock, flags);
3719 /* if there is no other devices under the same iommu
3720 * owned by this domain, clear this iommu in iommu_bmp
3721 * update iommu count and coherency
3723 if (iommu == device_to_iommu(info->segment, info->bus,
3728 spin_unlock_irqrestore(&device_domain_lock, flags);
3731 unsigned long tmp_flags;
3732 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3733 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3734 domain->iommu_count--;
3735 domain_update_iommu_cap(domain);
3736 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3738 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3739 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3740 spin_lock_irqsave(&iommu->lock, tmp_flags);
3741 clear_bit(domain->id, iommu->domain_ids);
3742 iommu->domains[domain->id] = NULL;
3743 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3748 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3750 struct device_domain_info *info;
3751 struct intel_iommu *iommu;
3752 unsigned long flags1, flags2;
3754 spin_lock_irqsave(&device_domain_lock, flags1);
3755 while (!list_empty(&domain->devices)) {
3756 info = list_entry(domain->devices.next,
3757 struct device_domain_info, link);
3758 list_del(&info->link);
3759 list_del(&info->global);
3761 info->dev->dev.archdata.iommu = NULL;
3763 spin_unlock_irqrestore(&device_domain_lock, flags1);
3765 iommu_disable_dev_iotlb(info);
3766 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3767 iommu_detach_dev(iommu, info->bus, info->devfn);
3768 iommu_detach_dependent_devices(iommu, info->dev);
3770 /* clear this iommu in iommu_bmp, update iommu count
3773 spin_lock_irqsave(&domain->iommu_lock, flags2);
3774 if (test_and_clear_bit(iommu->seq_id,
3775 &domain->iommu_bmp)) {
3776 domain->iommu_count--;
3777 domain_update_iommu_cap(domain);
3779 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3781 free_devinfo_mem(info);
3782 spin_lock_irqsave(&device_domain_lock, flags1);
3784 spin_unlock_irqrestore(&device_domain_lock, flags1);
3787 /* domain id for virtual machine, it won't be set in context */
3788 static unsigned long vm_domid;
3790 static struct dmar_domain *iommu_alloc_vm_domain(void)
3792 struct dmar_domain *domain;
3794 domain = alloc_domain_mem();
3798 domain->id = vm_domid++;
3800 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3801 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3806 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3810 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3811 spin_lock_init(&domain->iommu_lock);
3813 domain_reserve_special_ranges(domain);
3815 /* calculate AGAW */
3816 domain->gaw = guest_width;
3817 adjust_width = guestwidth_to_adjustwidth(guest_width);
3818 domain->agaw = width_to_agaw(adjust_width);
3820 INIT_LIST_HEAD(&domain->devices);
3822 domain->iommu_count = 0;
3823 domain->iommu_coherency = 0;
3824 domain->iommu_snooping = 0;
3825 domain->iommu_superpage = 0;
3826 domain->max_addr = 0;
3829 /* always allocate the top pgd */
3830 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3833 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3837 static void iommu_free_vm_domain(struct dmar_domain *domain)
3839 unsigned long flags;
3840 struct dmar_drhd_unit *drhd;
3841 struct intel_iommu *iommu;
3843 unsigned long ndomains;
3845 for_each_drhd_unit(drhd) {
3848 iommu = drhd->iommu;
3850 ndomains = cap_ndoms(iommu->cap);
3851 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3852 if (iommu->domains[i] == domain) {
3853 spin_lock_irqsave(&iommu->lock, flags);
3854 clear_bit(i, iommu->domain_ids);
3855 iommu->domains[i] = NULL;
3856 spin_unlock_irqrestore(&iommu->lock, flags);
3863 static void vm_domain_exit(struct dmar_domain *domain)
3865 /* Domain 0 is reserved, so dont process it */
3869 vm_domain_remove_all_dev_info(domain);
3871 put_iova_domain(&domain->iovad);
3874 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3876 /* free page tables */
3877 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3879 iommu_free_vm_domain(domain);
3880 free_domain_mem(domain);
3883 static int intel_iommu_domain_init(struct iommu_domain *domain)
3885 struct dmar_domain *dmar_domain;
3887 dmar_domain = iommu_alloc_vm_domain();
3890 "intel_iommu_domain_init: dmar_domain == NULL\n");
3893 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3895 "intel_iommu_domain_init() failed\n");
3896 vm_domain_exit(dmar_domain);
3899 domain_update_iommu_cap(dmar_domain);
3900 domain->priv = dmar_domain;
3905 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3907 struct dmar_domain *dmar_domain = domain->priv;
3909 domain->priv = NULL;
3910 vm_domain_exit(dmar_domain);
3913 static int intel_iommu_attach_device(struct iommu_domain *domain,
3916 struct dmar_domain *dmar_domain = domain->priv;
3917 struct pci_dev *pdev = to_pci_dev(dev);
3918 struct intel_iommu *iommu;
3921 /* normally pdev is not mapped */
3922 if (unlikely(domain_context_mapped(pdev))) {
3923 struct dmar_domain *old_domain;
3925 old_domain = find_domain(pdev);
3927 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3928 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3929 domain_remove_one_dev_info(old_domain, pdev);
3931 domain_remove_dev_info(old_domain);
3935 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3940 /* check if this iommu agaw is sufficient for max mapped address */
3941 addr_width = agaw_to_width(iommu->agaw);
3942 if (addr_width > cap_mgaw(iommu->cap))
3943 addr_width = cap_mgaw(iommu->cap);
3945 if (dmar_domain->max_addr > (1LL << addr_width)) {
3946 printk(KERN_ERR "%s: iommu width (%d) is not "
3947 "sufficient for the mapped address (%llx)\n",
3948 __func__, addr_width, dmar_domain->max_addr);
3951 dmar_domain->gaw = addr_width;
3954 * Knock out extra levels of page tables if necessary
3956 while (iommu->agaw < dmar_domain->agaw) {
3957 struct dma_pte *pte;
3959 pte = dmar_domain->pgd;
3960 if (dma_pte_present(pte)) {
3961 dmar_domain->pgd = (struct dma_pte *)
3962 phys_to_virt(dma_pte_addr(pte));
3963 free_pgtable_page(pte);
3965 dmar_domain->agaw--;
3968 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3971 static void intel_iommu_detach_device(struct iommu_domain *domain,
3974 struct dmar_domain *dmar_domain = domain->priv;
3975 struct pci_dev *pdev = to_pci_dev(dev);
3977 domain_remove_one_dev_info(dmar_domain, pdev);
3980 static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot)
3984 struct dmar_domain *dmar_domain = domain->priv;
3990 if (iommu_prot & IOMMU_READ)
3991 prot |= DMA_PTE_READ;
3992 if (iommu_prot & IOMMU_WRITE)
3993 prot |= DMA_PTE_WRITE;
3994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP;
3997 size = PAGE_SIZE << gfp_order;
3998 max_addr = iova + size;
3999 if (dmar_domain->max_addr < max_addr) {
4002 /* check if minimum agaw is sufficient for mapped address */
4003 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4004 if (end < max_addr) {
4005 printk(KERN_ERR "%s: iommu width (%d) is not "
4006 "sufficient for the mapped address (%llx)\n",
4007 __func__, dmar_domain->gaw, max_addr);
4010 dmar_domain->max_addr = max_addr;
4012 /* Round up size to next multiple of PAGE_SIZE, if it and
4013 the low bits of hpa would take us onto the next page */
4014 size = aligned_nrpages(hpa, size);
4015 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4016 hpa >> VTD_PAGE_SHIFT, size, prot);
4020 static int intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order)
4023 struct dmar_domain *dmar_domain = domain->priv;
4024 size_t size = PAGE_SIZE << gfp_order;
4027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4028 (iova + size - 1) >> VTD_PAGE_SHIFT);
4030 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova;
4036 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4039 struct dmar_domain *dmar_domain = domain->priv;
4040 struct dma_pte *pte;
4043 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4045 phys = dma_pte_addr(pte);
4050 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4053 struct dmar_domain *dmar_domain = domain->priv;
4055 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4056 return dmar_domain->iommu_snooping;
4057 if (cap == IOMMU_CAP_INTR_REMAP)
4058 return intr_remapping_enabled;
4063 static struct iommu_ops intel_iommu_ops = {
4064 .domain_init = intel_iommu_domain_init,
4065 .domain_destroy = intel_iommu_domain_destroy,
4066 .attach_dev = intel_iommu_attach_device,
4067 .detach_dev = intel_iommu_detach_device,
4068 .map = intel_iommu_map,
4069 .unmap = intel_iommu_unmap,
4070 .iova_to_phys = intel_iommu_iova_to_phys,
4071 .domain_has_cap = intel_iommu_domain_has_cap,
4074 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4077 * Mobile 4 Series Chipset neglects to set RWBF capability,
4080 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4083 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4084 if (dev->revision == 0x07) {
4085 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4090 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4093 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4094 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4095 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4096 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4097 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4098 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4099 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4100 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4102 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4106 if (pci_read_config_word(dev, GGC, &ggc))
4109 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4110 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4112 } else if (dmar_map_gfx) {
4113 /* we have to ensure the gfx device is idle before we flush */
4114 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4115 intel_iommu_strict = 1;
4118 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4119 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4120 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4121 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4123 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4124 ISOCH DMAR unit for the Azalia sound device, but not give it any
4125 TLB entries, which causes it to deadlock. Check for that. We do
4126 this in a function called from init_dmars(), instead of in a PCI
4127 quirk, because we don't want to print the obnoxious "BIOS broken"
4128 message if VT-d is actually disabled.
4130 static void __init check_tylersburg_isoch(void)
4132 struct pci_dev *pdev;
4133 uint32_t vtisochctrl;
4135 /* If there's no Azalia in the system anyway, forget it. */
4136 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4141 /* System Management Registers. Might be hidden, in which case
4142 we can't do the sanity check. But that's OK, because the
4143 known-broken BIOSes _don't_ actually hide it, so far. */
4144 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4148 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4155 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4156 if (vtisochctrl & 1)
4159 /* Drop all bits other than the number of TLB entries */
4160 vtisochctrl &= 0x1c;
4162 /* If we have the recommended number of TLB entries (16), fine. */
4163 if (vtisochctrl == 0x10)
4166 /* Zero TLB entries? You get to ride the short bus to school. */
4168 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4169 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4170 dmi_get_system_info(DMI_BIOS_VENDOR),
4171 dmi_get_system_info(DMI_BIOS_VERSION),
4172 dmi_get_system_info(DMI_PRODUCT_VERSION));
4173 iommu_identity_mapping |= IDENTMAP_AZALIA;
4177 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",