2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <asm/pci-direct.h>
29 #include <asm/iommu.h>
31 #include <asm/x86_init.h>
32 #include <asm/iommu_table.h>
34 #include "amd_iommu_proto.h"
35 #include "amd_iommu_types.h"
38 * definitions for the ACPI scanning code
40 #define IVRS_HEADER_LENGTH 48
42 #define ACPI_IVHD_TYPE 0x10
43 #define ACPI_IVMD_TYPE_ALL 0x20
44 #define ACPI_IVMD_TYPE 0x21
45 #define ACPI_IVMD_TYPE_RANGE 0x22
47 #define IVHD_DEV_ALL 0x01
48 #define IVHD_DEV_SELECT 0x02
49 #define IVHD_DEV_SELECT_RANGE_START 0x03
50 #define IVHD_DEV_RANGE_END 0x04
51 #define IVHD_DEV_ALIAS 0x42
52 #define IVHD_DEV_ALIAS_RANGE 0x43
53 #define IVHD_DEV_EXT_SELECT 0x46
54 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
56 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
57 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
58 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
59 #define IVHD_FLAG_ISOC_EN_MASK 0x08
61 #define IVMD_FLAG_EXCL_RANGE 0x08
62 #define IVMD_FLAG_UNITY_MAP 0x01
64 #define ACPI_DEVFLAG_INITPASS 0x01
65 #define ACPI_DEVFLAG_EXTINT 0x02
66 #define ACPI_DEVFLAG_NMI 0x04
67 #define ACPI_DEVFLAG_SYSMGT1 0x10
68 #define ACPI_DEVFLAG_SYSMGT2 0x20
69 #define ACPI_DEVFLAG_LINT0 0x40
70 #define ACPI_DEVFLAG_LINT1 0x80
71 #define ACPI_DEVFLAG_ATSDIS 0x10000000
74 * ACPI table definitions
76 * These data structures are laid over the table to parse the important values
81 * structure describing one IOMMU in the ACPI table. Typically followed by one
82 * or more ivhd_entrys.
94 } __attribute__((packed));
97 * A device entry describing which devices a specific IOMMU translates and
98 * which requestor ids they use.
105 } __attribute__((packed));
108 * An AMD IOMMU memory definition structure. It defines things like exclusion
109 * ranges for devices and regions that should be unity mapped.
120 } __attribute__((packed));
124 static int __initdata amd_iommu_detected;
125 static bool __initdata amd_iommu_disabled;
127 u16 amd_iommu_last_bdf; /* largest PCI device id we have
129 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
131 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
133 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
136 /* Array to assign indices to IOMMUs*/
137 struct amd_iommu *amd_iommus[MAX_IOMMUS];
138 int amd_iommus_present;
140 /* IOMMUs have a non-present cache? */
141 bool amd_iommu_np_cache __read_mostly;
142 bool amd_iommu_iotlb_sup __read_mostly = true;
144 u32 amd_iommu_max_pasids __read_mostly = ~0;
147 * The ACPI table parsing functions set this variable on an error
149 static int __initdata amd_iommu_init_err;
152 * List of protection domains - used during resume
154 LIST_HEAD(amd_iommu_pd_list);
155 spinlock_t amd_iommu_pd_lock;
158 * Pointer to the device table which is shared by all AMD IOMMUs
159 * it is indexed by the PCI device id or the HT unit id and contains
160 * information about the domain the device belongs to as well as the
161 * page table root pointer.
163 struct dev_table_entry *amd_iommu_dev_table;
166 * The alias table is a driver specific data structure which contains the
167 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
168 * More than one device can share the same requestor id.
170 u16 *amd_iommu_alias_table;
173 * The rlookup table is used to find the IOMMU which is responsible
174 * for a specific device. It is also indexed by the PCI device id.
176 struct amd_iommu **amd_iommu_rlookup_table;
179 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
180 * to know which ones are already in use.
182 unsigned long *amd_iommu_pd_alloc_bitmap;
184 static u32 dev_table_size; /* size of the device table */
185 static u32 alias_table_size; /* size of the alias table */
186 static u32 rlookup_table_size; /* size if the rlookup table */
189 * This function flushes all internal caches of
190 * the IOMMU used by this driver.
192 extern void iommu_flush_all_caches(struct amd_iommu *iommu);
194 static inline void update_last_devid(u16 devid)
196 if (devid > amd_iommu_last_bdf)
197 amd_iommu_last_bdf = devid;
200 static inline unsigned long tbl_size(int entry_size)
202 unsigned shift = PAGE_SHIFT +
203 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
208 /* Access to l1 and l2 indexed register spaces */
210 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
214 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
215 pci_read_config_dword(iommu->dev, 0xfc, &val);
219 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
221 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
222 pci_write_config_dword(iommu->dev, 0xfc, val);
223 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
226 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
230 pci_write_config_dword(iommu->dev, 0xf0, address);
231 pci_read_config_dword(iommu->dev, 0xf4, &val);
235 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
237 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
238 pci_write_config_dword(iommu->dev, 0xf4, val);
241 /****************************************************************************
243 * AMD IOMMU MMIO register space handling functions
245 * These functions are used to program the IOMMU device registers in
246 * MMIO space required for that driver.
248 ****************************************************************************/
251 * This function set the exclusion range in the IOMMU. DMA accesses to the
252 * exclusion range are passed through untranslated
254 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
256 u64 start = iommu->exclusion_start & PAGE_MASK;
257 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
260 if (!iommu->exclusion_start)
263 entry = start | MMIO_EXCL_ENABLE_MASK;
264 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
265 &entry, sizeof(entry));
268 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
269 &entry, sizeof(entry));
272 /* Programs the physical address of the device table into the IOMMU hardware */
273 static void __init iommu_set_device_table(struct amd_iommu *iommu)
277 BUG_ON(iommu->mmio_base == NULL);
279 entry = virt_to_phys(amd_iommu_dev_table);
280 entry |= (dev_table_size >> 12) - 1;
281 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
282 &entry, sizeof(entry));
285 /* Generic functions to enable/disable certain features of the IOMMU. */
286 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
290 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
292 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
295 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
299 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
301 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
304 /* Function to enable the hardware */
305 static void iommu_enable(struct amd_iommu *iommu)
307 static const char * const feat_str[] = {
308 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
309 "IA", "GA", "HE", "PC", NULL
313 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
314 dev_name(&iommu->dev->dev), iommu->cap_ptr);
316 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
317 printk(KERN_CONT " extended features: ");
318 for (i = 0; feat_str[i]; ++i)
319 if (iommu_feature(iommu, (1ULL << i)))
320 printk(KERN_CONT " %s", feat_str[i]);
322 printk(KERN_CONT "\n");
324 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
327 static void iommu_disable(struct amd_iommu *iommu)
329 /* Disable command buffer */
330 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
332 /* Disable event logging and event interrupts */
333 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
334 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
336 /* Disable IOMMU hardware itself */
337 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
341 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
342 * the system has one.
344 static u8 * __init iommu_map_mmio_space(u64 address)
348 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
349 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
351 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
355 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
359 release_mem_region(address, MMIO_REGION_LENGTH);
364 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
366 if (iommu->mmio_base)
367 iounmap(iommu->mmio_base);
368 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
371 /****************************************************************************
373 * The functions below belong to the first pass of AMD IOMMU ACPI table
374 * parsing. In this pass we try to find out the highest device id this
375 * code has to handle. Upon this information the size of the shared data
376 * structures is determined later.
378 ****************************************************************************/
381 * This function calculates the length of a given IVHD entry
383 static inline int ivhd_entry_length(u8 *ivhd)
385 return 0x04 << (*ivhd >> 6);
389 * This function reads the last device id the IOMMU has to handle from the PCI
390 * capability header for this IOMMU
392 static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
396 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
397 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
403 * After reading the highest device id from the IOMMU PCI capability header
404 * this function looks if there is a higher device id defined in the ACPI table
406 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
408 u8 *p = (void *)h, *end = (void *)h;
409 struct ivhd_entry *dev;
414 find_last_devid_on_pci(PCI_BUS(h->devid),
420 dev = (struct ivhd_entry *)p;
422 case IVHD_DEV_SELECT:
423 case IVHD_DEV_RANGE_END:
425 case IVHD_DEV_EXT_SELECT:
426 /* all the above subfield types refer to device ids */
427 update_last_devid(dev->devid);
432 p += ivhd_entry_length(p);
441 * Iterate over all IVHD entries in the ACPI table and find the highest device
442 * id which we need to handle. This is the first of three functions which parse
443 * the ACPI table. So we check the checksum here.
445 static int __init find_last_devid_acpi(struct acpi_table_header *table)
448 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
449 struct ivhd_header *h;
452 * Validate checksum here so we don't need to do it when
453 * we actually parse the table
455 for (i = 0; i < table->length; ++i)
458 /* ACPI table corrupt */
459 amd_iommu_init_err = -ENODEV;
463 p += IVRS_HEADER_LENGTH;
465 end += table->length;
467 h = (struct ivhd_header *)p;
470 find_last_devid_from_ivhd(h);
482 /****************************************************************************
484 * The following functions belong the the code path which parses the ACPI table
485 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
486 * data structures, initialize the device/alias/rlookup table and also
487 * basically initialize the hardware.
489 ****************************************************************************/
492 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
493 * write commands to that buffer later and the IOMMU will execute them
496 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
498 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
499 get_order(CMD_BUFFER_SIZE));
504 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
510 * This function resets the command buffer if the IOMMU stopped fetching
513 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
515 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
517 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
518 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
520 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
524 * This function writes the command buffer address to the hardware and
527 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
531 BUG_ON(iommu->cmd_buf == NULL);
533 entry = (u64)virt_to_phys(iommu->cmd_buf);
534 entry |= MMIO_CMD_SIZE_512;
536 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
537 &entry, sizeof(entry));
539 amd_iommu_reset_cmd_buffer(iommu);
540 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
543 static void __init free_command_buffer(struct amd_iommu *iommu)
545 free_pages((unsigned long)iommu->cmd_buf,
546 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
549 /* allocates the memory where the IOMMU will log its events to */
550 static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
552 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
553 get_order(EVT_BUFFER_SIZE));
555 if (iommu->evt_buf == NULL)
558 iommu->evt_buf_size = EVT_BUFFER_SIZE;
560 return iommu->evt_buf;
563 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
567 BUG_ON(iommu->evt_buf == NULL);
569 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
571 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
572 &entry, sizeof(entry));
574 /* set head and tail to zero manually */
575 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
576 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
578 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
581 static void __init free_event_buffer(struct amd_iommu *iommu)
583 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
586 /* allocates the memory where the IOMMU will log its events to */
587 static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
589 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
590 get_order(PPR_LOG_SIZE));
592 if (iommu->ppr_log == NULL)
595 return iommu->ppr_log;
598 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
602 if (iommu->ppr_log == NULL)
605 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
607 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
608 &entry, sizeof(entry));
610 /* set head and tail to zero manually */
611 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
612 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
614 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
615 iommu_feature_enable(iommu, CONTROL_PPR_EN);
618 static void __init free_ppr_log(struct amd_iommu *iommu)
620 if (iommu->ppr_log == NULL)
623 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
626 static void iommu_enable_gt(struct amd_iommu *iommu)
628 if (!iommu_feature(iommu, FEATURE_GT))
631 iommu_feature_enable(iommu, CONTROL_GT_EN);
634 /* sets a specific bit in the device table entry. */
635 static void set_dev_entry_bit(u16 devid, u8 bit)
637 int i = (bit >> 6) & 0x03;
638 int _bit = bit & 0x3f;
640 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
643 static int get_dev_entry_bit(u16 devid, u8 bit)
645 int i = (bit >> 6) & 0x03;
646 int _bit = bit & 0x3f;
648 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
652 void amd_iommu_apply_erratum_63(u16 devid)
656 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
657 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
660 set_dev_entry_bit(devid, DEV_ENTRY_IW);
663 /* Writes the specific IOMMU for a device into the rlookup table */
664 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
666 amd_iommu_rlookup_table[devid] = iommu;
670 * This function takes the device specific flags read from the ACPI
671 * table and sets up the device table entry with that information
673 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
674 u16 devid, u32 flags, u32 ext_flags)
676 if (flags & ACPI_DEVFLAG_INITPASS)
677 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
678 if (flags & ACPI_DEVFLAG_EXTINT)
679 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
680 if (flags & ACPI_DEVFLAG_NMI)
681 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
682 if (flags & ACPI_DEVFLAG_SYSMGT1)
683 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
684 if (flags & ACPI_DEVFLAG_SYSMGT2)
685 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
686 if (flags & ACPI_DEVFLAG_LINT0)
687 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
688 if (flags & ACPI_DEVFLAG_LINT1)
689 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
691 amd_iommu_apply_erratum_63(devid);
693 set_iommu_for_device(iommu, devid);
697 * Reads the device exclusion range from ACPI and initialize IOMMU with
700 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
702 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
704 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
709 * We only can configure exclusion ranges per IOMMU, not
710 * per device. But we can enable the exclusion range per
711 * device. This is done here
713 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
714 iommu->exclusion_start = m->range_start;
715 iommu->exclusion_length = m->range_length;
720 * This function reads some important data from the IOMMU PCI space and
721 * initializes the driver data structure with it. It reads the hardware
722 * capabilities and the first/last device entries
724 static void __init init_iommu_from_pci(struct amd_iommu *iommu)
726 int cap_ptr = iommu->cap_ptr;
727 u32 range, misc, low, high;
730 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
732 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
734 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
737 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
739 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
741 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
743 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
744 amd_iommu_iotlb_sup = false;
746 /* read extended feature bits */
747 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
748 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
750 iommu->features = ((u64)high << 32) | low;
752 if (iommu_feature(iommu, FEATURE_GT)) {
756 shift = iommu->features & FEATURE_PASID_MASK;
757 shift >>= FEATURE_PASID_SHIFT;
758 pasids = (1 << shift);
760 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
763 if (!is_rd890_iommu(iommu->dev))
767 * Some rd890 systems may not be fully reconfigured by the BIOS, so
768 * it's necessary for us to store this information so it can be
769 * reprogrammed on resume
772 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
773 &iommu->stored_addr_lo);
774 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
775 &iommu->stored_addr_hi);
777 /* Low bit locks writes to configuration space */
778 iommu->stored_addr_lo &= ~1;
780 for (i = 0; i < 6; i++)
781 for (j = 0; j < 0x12; j++)
782 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
784 for (i = 0; i < 0x83; i++)
785 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
789 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
790 * initializes the hardware and our data structures with it.
792 static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
793 struct ivhd_header *h)
796 u8 *end = p, flags = 0;
797 u16 devid = 0, devid_start = 0, devid_to = 0;
798 u32 dev_i, ext_flags = 0;
800 struct ivhd_entry *e;
803 * First save the recommended feature enable bits from ACPI
805 iommu->acpi_flags = h->flags;
808 * Done. Now parse the device entries
810 p += sizeof(struct ivhd_header);
815 e = (struct ivhd_entry *)p;
819 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
820 " last device %02x:%02x.%x flags: %02x\n",
821 PCI_BUS(iommu->first_device),
822 PCI_SLOT(iommu->first_device),
823 PCI_FUNC(iommu->first_device),
824 PCI_BUS(iommu->last_device),
825 PCI_SLOT(iommu->last_device),
826 PCI_FUNC(iommu->last_device),
829 for (dev_i = iommu->first_device;
830 dev_i <= iommu->last_device; ++dev_i)
831 set_dev_entry_from_acpi(iommu, dev_i,
834 case IVHD_DEV_SELECT:
836 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
844 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
846 case IVHD_DEV_SELECT_RANGE_START:
848 DUMP_printk(" DEV_SELECT_RANGE_START\t "
849 "devid: %02x:%02x.%x flags: %02x\n",
855 devid_start = e->devid;
862 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
863 "flags: %02x devid_to: %02x:%02x.%x\n",
868 PCI_BUS(e->ext >> 8),
869 PCI_SLOT(e->ext >> 8),
870 PCI_FUNC(e->ext >> 8));
873 devid_to = e->ext >> 8;
874 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
875 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
876 amd_iommu_alias_table[devid] = devid_to;
878 case IVHD_DEV_ALIAS_RANGE:
880 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
881 "devid: %02x:%02x.%x flags: %02x "
882 "devid_to: %02x:%02x.%x\n",
887 PCI_BUS(e->ext >> 8),
888 PCI_SLOT(e->ext >> 8),
889 PCI_FUNC(e->ext >> 8));
891 devid_start = e->devid;
893 devid_to = e->ext >> 8;
897 case IVHD_DEV_EXT_SELECT:
899 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
900 "flags: %02x ext: %08x\n",
907 set_dev_entry_from_acpi(iommu, devid, e->flags,
910 case IVHD_DEV_EXT_SELECT_RANGE:
912 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
913 "%02x:%02x.%x flags: %02x ext: %08x\n",
919 devid_start = e->devid;
924 case IVHD_DEV_RANGE_END:
926 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
932 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
934 amd_iommu_alias_table[dev_i] = devid_to;
935 set_dev_entry_from_acpi(iommu,
936 devid_to, flags, ext_flags);
938 set_dev_entry_from_acpi(iommu, dev_i,
946 p += ivhd_entry_length(p);
950 /* Initializes the device->iommu mapping for the driver */
951 static int __init init_iommu_devices(struct amd_iommu *iommu)
955 for (i = iommu->first_device; i <= iommu->last_device; ++i)
956 set_iommu_for_device(iommu, i);
961 static void __init free_iommu_one(struct amd_iommu *iommu)
963 free_command_buffer(iommu);
964 free_event_buffer(iommu);
966 iommu_unmap_mmio_space(iommu);
969 static void __init free_iommu_all(void)
971 struct amd_iommu *iommu, *next;
973 for_each_iommu_safe(iommu, next) {
974 list_del(&iommu->list);
975 free_iommu_one(iommu);
981 * This function clues the initialization function for one IOMMU
982 * together and also allocates the command buffer and programs the
983 * hardware. It does NOT enable the IOMMU. This is done afterwards.
985 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
987 spin_lock_init(&iommu->lock);
989 /* Add IOMMU to internal data structures */
990 list_add_tail(&iommu->list, &amd_iommu_list);
991 iommu->index = amd_iommus_present++;
993 if (unlikely(iommu->index >= MAX_IOMMUS)) {
994 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
998 /* Index is fine - add IOMMU to the array */
999 amd_iommus[iommu->index] = iommu;
1002 * Copy data from ACPI table entry to the iommu struct
1004 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
1008 iommu->cap_ptr = h->cap_ptr;
1009 iommu->pci_seg = h->pci_seg;
1010 iommu->mmio_phys = h->mmio_phys;
1011 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
1012 if (!iommu->mmio_base)
1015 iommu->cmd_buf = alloc_command_buffer(iommu);
1016 if (!iommu->cmd_buf)
1019 iommu->evt_buf = alloc_event_buffer(iommu);
1020 if (!iommu->evt_buf)
1023 iommu->int_enabled = false;
1025 init_iommu_from_pci(iommu);
1026 init_iommu_from_acpi(iommu, h);
1027 init_iommu_devices(iommu);
1029 if (iommu_feature(iommu, FEATURE_PPR)) {
1030 iommu->ppr_log = alloc_ppr_log(iommu);
1031 if (!iommu->ppr_log)
1035 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1036 amd_iommu_np_cache = true;
1038 return pci_enable_device(iommu->dev);
1042 * Iterates over all IOMMU entries in the ACPI table, allocates the
1043 * IOMMU structure and initializes it with init_iommu_one()
1045 static int __init init_iommu_all(struct acpi_table_header *table)
1047 u8 *p = (u8 *)table, *end = (u8 *)table;
1048 struct ivhd_header *h;
1049 struct amd_iommu *iommu;
1052 end += table->length;
1053 p += IVRS_HEADER_LENGTH;
1056 h = (struct ivhd_header *)p;
1058 case ACPI_IVHD_TYPE:
1060 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1061 "seg: %d flags: %01x info %04x\n",
1062 PCI_BUS(h->devid), PCI_SLOT(h->devid),
1063 PCI_FUNC(h->devid), h->cap_ptr,
1064 h->pci_seg, h->flags, h->info);
1065 DUMP_printk(" mmio-addr: %016llx\n",
1068 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1069 if (iommu == NULL) {
1070 amd_iommu_init_err = -ENOMEM;
1074 ret = init_iommu_one(iommu, h);
1076 amd_iommu_init_err = ret;
1091 /****************************************************************************
1093 * The following functions initialize the MSI interrupts for all IOMMUs
1094 * in the system. Its a bit challenging because there could be multiple
1095 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1098 ****************************************************************************/
1100 static int iommu_setup_msi(struct amd_iommu *iommu)
1104 if (pci_enable_msi(iommu->dev))
1107 r = request_threaded_irq(iommu->dev->irq,
1108 amd_iommu_int_handler,
1109 amd_iommu_int_thread,
1114 pci_disable_msi(iommu->dev);
1118 iommu->int_enabled = true;
1119 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1121 if (iommu->ppr_log != NULL)
1122 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1127 static int iommu_init_msi(struct amd_iommu *iommu)
1129 if (iommu->int_enabled)
1132 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1133 return iommu_setup_msi(iommu);
1138 /****************************************************************************
1140 * The next functions belong to the third pass of parsing the ACPI
1141 * table. In this last pass the memory mapping requirements are
1142 * gathered (like exclusion and unity mapping reanges).
1144 ****************************************************************************/
1146 static void __init free_unity_maps(void)
1148 struct unity_map_entry *entry, *next;
1150 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1151 list_del(&entry->list);
1156 /* called when we find an exclusion range definition in ACPI */
1157 static int __init init_exclusion_range(struct ivmd_header *m)
1162 case ACPI_IVMD_TYPE:
1163 set_device_exclusion_range(m->devid, m);
1165 case ACPI_IVMD_TYPE_ALL:
1166 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1167 set_device_exclusion_range(i, m);
1169 case ACPI_IVMD_TYPE_RANGE:
1170 for (i = m->devid; i <= m->aux; ++i)
1171 set_device_exclusion_range(i, m);
1180 /* called for unity map ACPI definition */
1181 static int __init init_unity_map_range(struct ivmd_header *m)
1183 struct unity_map_entry *e = 0;
1186 e = kzalloc(sizeof(*e), GFP_KERNEL);
1194 case ACPI_IVMD_TYPE:
1195 s = "IVMD_TYPEi\t\t\t";
1196 e->devid_start = e->devid_end = m->devid;
1198 case ACPI_IVMD_TYPE_ALL:
1199 s = "IVMD_TYPE_ALL\t\t";
1201 e->devid_end = amd_iommu_last_bdf;
1203 case ACPI_IVMD_TYPE_RANGE:
1204 s = "IVMD_TYPE_RANGE\t\t";
1205 e->devid_start = m->devid;
1206 e->devid_end = m->aux;
1209 e->address_start = PAGE_ALIGN(m->range_start);
1210 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1211 e->prot = m->flags >> 1;
1213 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1214 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1215 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1216 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1217 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1218 e->address_start, e->address_end, m->flags);
1220 list_add_tail(&e->list, &amd_iommu_unity_map);
1225 /* iterates over all memory definitions we find in the ACPI table */
1226 static int __init init_memory_definitions(struct acpi_table_header *table)
1228 u8 *p = (u8 *)table, *end = (u8 *)table;
1229 struct ivmd_header *m;
1231 end += table->length;
1232 p += IVRS_HEADER_LENGTH;
1235 m = (struct ivmd_header *)p;
1236 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1237 init_exclusion_range(m);
1238 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1239 init_unity_map_range(m);
1248 * Init the device table to not allow DMA access for devices and
1249 * suppress all page faults
1251 static void init_device_table(void)
1255 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1256 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1257 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1261 static void iommu_init_flags(struct amd_iommu *iommu)
1263 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1264 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1265 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1267 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1268 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1269 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1271 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1272 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1273 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1275 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1276 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1277 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1280 * make IOMMU memory accesses cache coherent
1282 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1285 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1288 u32 ioc_feature_control;
1289 struct pci_dev *pdev = NULL;
1291 /* RD890 BIOSes may not have completely reconfigured the iommu */
1292 if (!is_rd890_iommu(iommu->dev))
1296 * First, we need to ensure that the iommu is enabled. This is
1297 * controlled by a register in the northbridge
1299 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1304 /* Select Northbridge indirect register 0x75 and enable writing */
1305 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1306 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1308 /* Enable the iommu */
1309 if (!(ioc_feature_control & 0x1))
1310 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1314 /* Restore the iommu BAR */
1315 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1316 iommu->stored_addr_lo);
1317 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1318 iommu->stored_addr_hi);
1320 /* Restore the l1 indirect regs for each of the 6 l1s */
1321 for (i = 0; i < 6; i++)
1322 for (j = 0; j < 0x12; j++)
1323 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1325 /* Restore the l2 indirect regs */
1326 for (i = 0; i < 0x83; i++)
1327 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1329 /* Lock PCI setup registers */
1330 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1331 iommu->stored_addr_lo | 1);
1335 * This function finally enables all IOMMUs found in the system after
1336 * they have been initialized
1338 static void enable_iommus(void)
1340 struct amd_iommu *iommu;
1342 for_each_iommu(iommu) {
1343 iommu_disable(iommu);
1344 iommu_init_flags(iommu);
1345 iommu_set_device_table(iommu);
1346 iommu_enable_command_buffer(iommu);
1347 iommu_enable_event_buffer(iommu);
1348 iommu_enable_ppr_log(iommu);
1349 iommu_enable_gt(iommu);
1350 iommu_set_exclusion_range(iommu);
1351 iommu_init_msi(iommu);
1352 iommu_enable(iommu);
1353 iommu_flush_all_caches(iommu);
1357 static void disable_iommus(void)
1359 struct amd_iommu *iommu;
1361 for_each_iommu(iommu)
1362 iommu_disable(iommu);
1366 * Suspend/Resume support
1367 * disable suspend until real resume implemented
1370 static void amd_iommu_resume(void)
1372 struct amd_iommu *iommu;
1374 for_each_iommu(iommu)
1375 iommu_apply_resume_quirks(iommu);
1377 /* re-load the hardware */
1381 * we have to flush after the IOMMUs are enabled because a
1382 * disabled IOMMU will never execute the commands we send
1384 for_each_iommu(iommu)
1385 iommu_flush_all_caches(iommu);
1388 static int amd_iommu_suspend(void)
1390 /* disable IOMMUs to go out of the way for BIOS */
1396 static struct syscore_ops amd_iommu_syscore_ops = {
1397 .suspend = amd_iommu_suspend,
1398 .resume = amd_iommu_resume,
1402 * This is the core init function for AMD IOMMU hardware in the system.
1403 * This function is called from the generic x86 DMA layer initialization
1406 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1409 * 1 pass) Find the highest PCI device id the driver has to handle.
1410 * Upon this information the size of the data structures is
1411 * determined that needs to be allocated.
1413 * 2 pass) Initialize the data structures just allocated with the
1414 * information in the ACPI table about available AMD IOMMUs
1415 * in the system. It also maps the PCI devices in the
1416 * system to specific IOMMUs
1418 * 3 pass) After the basic data structures are allocated and
1419 * initialized we update them with information about memory
1420 * remapping requirements parsed out of the ACPI table in
1423 * After that the hardware is initialized and ready to go. In the last
1424 * step we do some Linux specific things like registering the driver in
1425 * the dma_ops interface and initializing the suspend/resume support
1426 * functions. Finally it prints some information about AMD IOMMUs and
1427 * the driver state and enables the hardware.
1429 static int __init amd_iommu_init(void)
1434 * First parse ACPI tables to find the largest Bus/Dev/Func
1435 * we need to handle. Upon this information the shared data
1436 * structures for the IOMMUs in the system will be allocated
1438 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1441 ret = amd_iommu_init_err;
1445 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1446 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1447 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1451 /* Device table - directly used by all IOMMUs */
1452 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1453 get_order(dev_table_size));
1454 if (amd_iommu_dev_table == NULL)
1458 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1459 * IOMMU see for that device
1461 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1462 get_order(alias_table_size));
1463 if (amd_iommu_alias_table == NULL)
1466 /* IOMMU rlookup table - find the IOMMU for a specific device */
1467 amd_iommu_rlookup_table = (void *)__get_free_pages(
1468 GFP_KERNEL | __GFP_ZERO,
1469 get_order(rlookup_table_size));
1470 if (amd_iommu_rlookup_table == NULL)
1473 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1474 GFP_KERNEL | __GFP_ZERO,
1475 get_order(MAX_DOMAIN_ID/8));
1476 if (amd_iommu_pd_alloc_bitmap == NULL)
1479 /* init the device table */
1480 init_device_table();
1483 * let all alias entries point to itself
1485 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1486 amd_iommu_alias_table[i] = i;
1489 * never allocate domain 0 because its used as the non-allocated and
1490 * error value placeholder
1492 amd_iommu_pd_alloc_bitmap[0] = 1;
1494 spin_lock_init(&amd_iommu_pd_lock);
1497 * now the data structures are allocated and basically initialized
1498 * start the real acpi table scan
1501 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1504 if (amd_iommu_init_err) {
1505 ret = amd_iommu_init_err;
1509 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1512 if (amd_iommu_init_err) {
1513 ret = amd_iommu_init_err;
1517 ret = amd_iommu_init_devices();
1523 if (iommu_pass_through)
1524 ret = amd_iommu_init_passthrough();
1526 ret = amd_iommu_init_dma_ops();
1531 amd_iommu_init_api();
1533 amd_iommu_init_notifier();
1535 register_syscore_ops(&amd_iommu_syscore_ops);
1537 if (iommu_pass_through)
1540 if (amd_iommu_unmap_flush)
1541 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1543 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1545 x86_platform.iommu_shutdown = disable_iommus;
1553 amd_iommu_uninit_devices();
1555 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1556 get_order(MAX_DOMAIN_ID/8));
1558 free_pages((unsigned long)amd_iommu_rlookup_table,
1559 get_order(rlookup_table_size));
1561 free_pages((unsigned long)amd_iommu_alias_table,
1562 get_order(alias_table_size));
1564 free_pages((unsigned long)amd_iommu_dev_table,
1565 get_order(dev_table_size));
1571 #ifdef CONFIG_GART_IOMMU
1573 * We failed to initialize the AMD IOMMU - try fallback to GART
1583 /****************************************************************************
1585 * Early detect code. This code runs at IOMMU detection time in the DMA
1586 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1589 ****************************************************************************/
1590 static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1595 int __init amd_iommu_detect(void)
1597 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1600 if (amd_iommu_disabled)
1603 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1605 amd_iommu_detected = 1;
1606 x86_init.iommu.iommu_init = amd_iommu_init;
1608 /* Make sure ACS will be enabled */
1615 /****************************************************************************
1617 * Parsing functions for the AMD IOMMU specific kernel command line
1620 ****************************************************************************/
1622 static int __init parse_amd_iommu_dump(char *str)
1624 amd_iommu_dump = true;
1629 static int __init parse_amd_iommu_options(char *str)
1631 for (; *str; ++str) {
1632 if (strncmp(str, "fullflush", 9) == 0)
1633 amd_iommu_unmap_flush = true;
1634 if (strncmp(str, "off", 3) == 0)
1635 amd_iommu_disabled = true;
1641 __setup("amd_iommu_dump", parse_amd_iommu_dump);
1642 __setup("amd_iommu=", parse_amd_iommu_options);
1644 IOMMU_INIT_FINISH(amd_iommu_detect,
1645 gart_iommu_hole_init,