#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/export.h>
+++++ #include <linux/irq.h>
+++++ #include <linux/msi.h>
+++++ #include <asm/irq_remapping.h>
+++++ #include <asm/io_apic.h>
+++++ #include <asm/apic.h>
+++++ #include <asm/hw_irq.h>
#include <asm/msidef.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
+++++ #include "irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
static LIST_HEAD(dev_data_list);
static DEFINE_SPINLOCK(dev_data_list_lock);
+++++ LIST_HEAD(ioapic_map);
+++++ LIST_HEAD(hpet_map);
+++++
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
u32 data[4];
};
+++++ struct kmem_cache *amd_iommu_irq_cache;
+++++
static void update_domain(struct protection_domain *domain);
static int __init alloc_passthrough_domain(void);
static int iommu_init_device(struct device *dev)
{
----- struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
+++++ struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
struct iommu_group *group;
u16 alias;
dev_data->alias_data = alias_data;
dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
----- } else
+++++ }
+++++
+++++ if (dma_pdev == NULL)
dma_pdev = pci_dev_get(pdev);
+ /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as function 0.
+ */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
+ /*
+ * Devices on the root bus go through the iommu. If that's not us,
+ * find the next upstream device and test ACS up to the root bus.
+ * Finding the next device may require skipping virtual buses.
+ */
while (!pci_is_root_bus(dma_pdev->bus)) {
- if (pci_acs_path_enabled(dma_pdev->bus->self,
- NULL, REQ_ACS_FLAGS))
+ struct pci_bus *bus = dma_pdev->bus;
+
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ goto root_bus;
+ }
+
+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
+ root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
/*
* Release iommu->lock because ppr-handling might need to
--- -- * re-aquire it
+++ ++ * re-acquire it
*/
spin_unlock_irqrestore(&iommu->lock, flags);
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
if (s) /* size bit - we flush more than one 4kb page */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
--- -- if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
+++ ++ if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
CMD_SET_TYPE(cmd, CMD_INV_ALL);
}
+++++ static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
+++++ {
+++++ memset(cmd, 0, sizeof(*cmd));
+++++ cmd->data[0] = devid;
+++++ CMD_SET_TYPE(cmd, CMD_INV_IRT);
+++++ }
+++++
/*
* Writes the command to the IOMMUs command buffer and informs the
* hardware about the new command.
iommu_completion_wait(iommu);
}
+++++ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
+++++ {
+++++ struct iommu_cmd cmd;
+++++
+++++ build_inv_irt(&cmd, devid);
+++++
+++++ iommu_queue_command(iommu, &cmd);
+++++ }
+++++
+++++ static void iommu_flush_irt_all(struct amd_iommu *iommu)
+++++ {
+++++ u32 devid;
+++++
+++++ for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+++++ iommu_flush_irt(iommu, devid);
+++++
+++++ iommu_completion_wait(iommu);
+++++ }
+++++
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (iommu_feature(iommu, FEATURE_IA)) {
iommu_flush_all(iommu);
} else {
iommu_flush_dte_all(iommu);
+++++ iommu_flush_irt_all(iommu);
iommu_flush_tlb_all(iommu);
}
}
}
/*
--- -- * If a device is not yet associated with a domain, this function does
+++ ++ * If a device is not yet associated with a domain, this function
* assigns it visible for the hardware
*/
static int attach_device(struct device *dev,
if (domain != NULL)
return domain;
--- -- /* Device not bount yet - bind it */
+++ ++ /* Device not bound yet - bind it */
dma_dom = find_protection_domain(devid);
if (!dma_dom)
dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
alloc_passthrough_domain();
dev_data->passthrough = true;
attach_device(&dev->dev, pt_domain);
--- -- pr_info("AMD-Vi: Using passthough domain for device %s\n",
+++ ++ pr_info("AMD-Vi: Using passthrough domain for device %s\n",
dev_name(&dev->dev));
}
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return 1;
+++++ case IOMMU_CAP_INTR_REMAP:
+++++ return irq_remapping_enabled;
}
return 0;
return 0;
}
EXPORT_SYMBOL(amd_iommu_device_info);
+++++
+++++ #ifdef CONFIG_IRQ_REMAP
+++++
+++++ /*****************************************************************************
+++++ *
+++++ * Interrupt Remapping Implementation
+++++ *
+++++ *****************************************************************************/
+++++
+++++ union irte {
+++++ u32 val;
+++++ struct {
+++++ u32 valid : 1,
+++++ no_fault : 1,
+++++ int_type : 3,
+++++ rq_eoi : 1,
+++++ dm : 1,
+++++ rsvd_1 : 1,
+++++ destination : 8,
+++++ vector : 8,
+++++ rsvd_2 : 8;
+++++ } fields;
+++++ };
+++++
+++++ #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
+++++ #define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
+++++ #define DTE_IRQ_TABLE_LEN (8ULL << 1)
+++++ #define DTE_IRQ_REMAP_ENABLE 1ULL
+++++
+++++ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+++++ {
+++++ u64 dte;
+++++
+++++ dte = amd_iommu_dev_table[devid].data[2];
+++++ dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
+++++ dte |= virt_to_phys(table->table);
+++++ dte |= DTE_IRQ_REMAP_INTCTL;
+++++ dte |= DTE_IRQ_TABLE_LEN;
+++++ dte |= DTE_IRQ_REMAP_ENABLE;
+++++
+++++ amd_iommu_dev_table[devid].data[2] = dte;
+++++ }
+++++
+++++ #define IRTE_ALLOCATED (~1U)
+++++
+++++ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
+++++ {
+++++ struct irq_remap_table *table = NULL;
+++++ struct amd_iommu *iommu;
+++++ unsigned long flags;
+++++ u16 alias;
+++++
+++++ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+++++
+++++ iommu = amd_iommu_rlookup_table[devid];
+++++ if (!iommu)
+++++ goto out_unlock;
+++++
+++++ table = irq_lookup_table[devid];
+++++ if (table)
+++++ goto out;
+++++
+++++ alias = amd_iommu_alias_table[devid];
+++++ table = irq_lookup_table[alias];
+++++ if (table) {
+++++ irq_lookup_table[devid] = table;
+++++ set_dte_irq_entry(devid, table);
+++++ iommu_flush_dte(iommu, devid);
+++++ goto out;
+++++ }
+++++
+++++ /* Nothing there yet, allocate new irq remapping table */
+++++ table = kzalloc(sizeof(*table), GFP_ATOMIC);
+++++ if (!table)
+++++ goto out;
+++++
+++++ if (ioapic)
+++++ /* Keep the first 32 indexes free for IOAPIC interrupts */
+++++ table->min_index = 32;
+++++
+++++ table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
+++++ if (!table->table) {
+++++ kfree(table);
+++++ table = NULL;
+++++ goto out;
+++++ }
+++++
+++++ memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
+++++
+++++ if (ioapic) {
+++++ int i;
+++++
+++++ for (i = 0; i < 32; ++i)
+++++ table->table[i] = IRTE_ALLOCATED;
+++++ }
+++++
+++++ irq_lookup_table[devid] = table;
+++++ set_dte_irq_entry(devid, table);
+++++ iommu_flush_dte(iommu, devid);
+++++ if (devid != alias) {
+++++ irq_lookup_table[alias] = table;
+++++ set_dte_irq_entry(devid, table);
+++++ iommu_flush_dte(iommu, alias);
+++++ }
+++++
+++++ out:
+++++ iommu_completion_wait(iommu);
+++++
+++++ out_unlock:
+++++ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+++++
+++++ return table;
+++++ }
+++++
+++++ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
+++++ {
+++++ struct irq_remap_table *table;
+++++ unsigned long flags;
+++++ int index, c;
+++++
+++++ table = get_irq_table(devid, false);
+++++ if (!table)
+++++ return -ENODEV;
+++++
+++++ spin_lock_irqsave(&table->lock, flags);
+++++
+++++ /* Scan table for free entries */
+++++ for (c = 0, index = table->min_index;
+++++ index < MAX_IRQS_PER_TABLE;
+++++ ++index) {
+++++ if (table->table[index] == 0)
+++++ c += 1;
+++++ else
+++++ c = 0;
+++++
+++++ if (c == count) {
+++++ struct irq_2_iommu *irte_info;
+++++
+++++ for (; c != 0; --c)
+++++ table->table[index - c + 1] = IRTE_ALLOCATED;
+++++
+++++ index -= count - 1;
+++++
+++++ irte_info = &cfg->irq_2_iommu;
+++++ irte_info->sub_handle = devid;
+++++ irte_info->irte_index = index;
+++++ irte_info->iommu = (void *)cfg;
+++++
+++++ goto out;
+++++ }
+++++ }
+++++
+++++ index = -ENOSPC;
+++++
+++++ out:
+++++ spin_unlock_irqrestore(&table->lock, flags);
+++++
+++++ return index;
+++++ }
+++++
+++++ static int get_irte(u16 devid, int index, union irte *irte)
+++++ {
+++++ struct irq_remap_table *table;
+++++ unsigned long flags;
+++++
+++++ table = get_irq_table(devid, false);
+++++ if (!table)
+++++ return -ENOMEM;
+++++
+++++ spin_lock_irqsave(&table->lock, flags);
+++++ irte->val = table->table[index];
+++++ spin_unlock_irqrestore(&table->lock, flags);
+++++
+++++ return 0;
+++++ }
+++++
+++++ static int modify_irte(u16 devid, int index, union irte irte)
+++++ {
+++++ struct irq_remap_table *table;
+++++ struct amd_iommu *iommu;
+++++ unsigned long flags;
+++++
+++++ iommu = amd_iommu_rlookup_table[devid];
+++++ if (iommu == NULL)
+++++ return -EINVAL;
+++++
+++++ table = get_irq_table(devid, false);
+++++ if (!table)
+++++ return -ENOMEM;
+++++
+++++ spin_lock_irqsave(&table->lock, flags);
+++++ table->table[index] = irte.val;
+++++ spin_unlock_irqrestore(&table->lock, flags);
+++++
+++++ iommu_flush_irt(iommu, devid);
+++++ iommu_completion_wait(iommu);
+++++
+++++ return 0;
+++++ }
+++++
+++++ static void free_irte(u16 devid, int index)
+++++ {
+++++ struct irq_remap_table *table;
+++++ struct amd_iommu *iommu;
+++++ unsigned long flags;
+++++
+++++ iommu = amd_iommu_rlookup_table[devid];
+++++ if (iommu == NULL)
+++++ return;
+++++
+++++ table = get_irq_table(devid, false);
+++++ if (!table)
+++++ return;
+++++
+++++ spin_lock_irqsave(&table->lock, flags);
+++++ table->table[index] = 0;
+++++ spin_unlock_irqrestore(&table->lock, flags);
+++++
+++++ iommu_flush_irt(iommu, devid);
+++++ iommu_completion_wait(iommu);
+++++ }
+++++
+++++ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
+++++ unsigned int destination, int vector,
+++++ struct io_apic_irq_attr *attr)
+++++ {
+++++ struct irq_remap_table *table;
+++++ struct irq_2_iommu *irte_info;
+++++ struct irq_cfg *cfg;
+++++ union irte irte;
+++++ int ioapic_id;
+++++ int index;
+++++ int devid;
+++++ int ret;
+++++
+++++ cfg = irq_get_chip_data(irq);
+++++ if (!cfg)
+++++ return -EINVAL;
+++++
+++++ irte_info = &cfg->irq_2_iommu;
+++++ ioapic_id = mpc_ioapic_id(attr->ioapic);
+++++ devid = get_ioapic_devid(ioapic_id);
+++++
+++++ if (devid < 0)
+++++ return devid;
+++++
+++++ table = get_irq_table(devid, true);
+++++ if (table == NULL)
+++++ return -ENOMEM;
+++++
+++++ index = attr->ioapic_pin;
+++++
+++++ /* Setup IRQ remapping info */
+++++ irte_info->sub_handle = devid;
+++++ irte_info->irte_index = index;
+++++ irte_info->iommu = (void *)cfg;
+++++
+++++ /* Setup IRTE for IOMMU */
+++++ irte.val = 0;
+++++ irte.fields.vector = vector;
+++++ irte.fields.int_type = apic->irq_delivery_mode;
+++++ irte.fields.destination = destination;
+++++ irte.fields.dm = apic->irq_dest_mode;
+++++ irte.fields.valid = 1;
+++++
+++++ ret = modify_irte(devid, index, irte);
+++++ if (ret)
+++++ return ret;
+++++
+++++ /* Setup IOAPIC entry */
+++++ memset(entry, 0, sizeof(*entry));
+++++
+++++ entry->vector = index;
+++++ entry->mask = 0;
+++++ entry->trigger = attr->trigger;
+++++ entry->polarity = attr->polarity;
+++++
+++++ /*
+++++ * Mask level triggered irqs.
+++++ */
+++++ if (attr->trigger)
+++++ entry->mask = 1;
+++++
+++++ return 0;
+++++ }
+++++
+++++ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
+++++ bool force)
+++++ {
+++++ struct irq_2_iommu *irte_info;
+++++ unsigned int dest, irq;
+++++ struct irq_cfg *cfg;
+++++ union irte irte;
+++++ int err;
+++++
+++++ if (!config_enabled(CONFIG_SMP))
+++++ return -1;
+++++
+++++ cfg = data->chip_data;
+++++ irq = data->irq;
+++++ irte_info = &cfg->irq_2_iommu;
+++++
+++++ if (!cpumask_intersects(mask, cpu_online_mask))
+++++ return -EINVAL;
+++++
+++++ if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
+++++ return -EBUSY;
+++++
+++++ if (assign_irq_vector(irq, cfg, mask))
+++++ return -EBUSY;
+++++
+++++ err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
+++++ if (err) {
+++++ if (assign_irq_vector(irq, cfg, data->affinity))
+++++ pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
+++++ return err;
+++++ }
+++++
+++++ irte.fields.vector = cfg->vector;
+++++ irte.fields.destination = dest;
+++++
+++++ modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+++++
+++++ if (cfg->move_in_progress)
+++++ send_cleanup_vector(cfg);
+++++
+++++ cpumask_copy(data->affinity, mask);
+++++
+++++ return 0;
+++++ }
+++++
+++++ static int free_irq(int irq)
+++++ {
+++++ struct irq_2_iommu *irte_info;
+++++ struct irq_cfg *cfg;
+++++
+++++ cfg = irq_get_chip_data(irq);
+++++ if (!cfg)
+++++ return -EINVAL;
+++++
+++++ irte_info = &cfg->irq_2_iommu;
+++++
+++++ free_irte(irte_info->sub_handle, irte_info->irte_index);
+++++
+++++ return 0;
+++++ }
+++++
+++++ static void compose_msi_msg(struct pci_dev *pdev,
+++++ unsigned int irq, unsigned int dest,
+++++ struct msi_msg *msg, u8 hpet_id)
+++++ {
+++++ struct irq_2_iommu *irte_info;
+++++ struct irq_cfg *cfg;
+++++ union irte irte;
+++++
+++++ cfg = irq_get_chip_data(irq);
+++++ if (!cfg)
+++++ return;
+++++
+++++ irte_info = &cfg->irq_2_iommu;
+++++
+++++ irte.val = 0;
+++++ irte.fields.vector = cfg->vector;
+++++ irte.fields.int_type = apic->irq_delivery_mode;
+++++ irte.fields.destination = dest;
+++++ irte.fields.dm = apic->irq_dest_mode;
+++++ irte.fields.valid = 1;
+++++
+++++ modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+++++
+++++ msg->address_hi = MSI_ADDR_BASE_HI;
+++++ msg->address_lo = MSI_ADDR_BASE_LO;
+++++ msg->data = irte_info->irte_index;
+++++ }
+++++
+++++ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
+++++ {
+++++ struct irq_cfg *cfg;
+++++ int index;
+++++ u16 devid;
+++++
+++++ if (!pdev)
+++++ return -EINVAL;
+++++
+++++ cfg = irq_get_chip_data(irq);
+++++ if (!cfg)
+++++ return -EINVAL;
+++++
+++++ devid = get_device_id(&pdev->dev);
+++++ index = alloc_irq_index(cfg, devid, nvec);
+++++
+++++ return index < 0 ? MAX_IRQS_PER_TABLE : index;
+++++ }
+++++
+++++ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
+++++ int index, int offset)
+++++ {
+++++ struct irq_2_iommu *irte_info;
+++++ struct irq_cfg *cfg;
+++++ u16 devid;
+++++
+++++ if (!pdev)
+++++ return -EINVAL;
+++++
+++++ cfg = irq_get_chip_data(irq);
+++++ if (!cfg)
+++++ return -EINVAL;
+++++
+++++ if (index >= MAX_IRQS_PER_TABLE)
+++++ return 0;
+++++
+++++ devid = get_device_id(&pdev->dev);
+++++ irte_info = &cfg->irq_2_iommu;
+++++
+++++ irte_info->sub_handle = devid;
+++++ irte_info->irte_index = index + offset;
+++++ irte_info->iommu = (void *)cfg;
+++++
+++++ return 0;
+++++ }
+++++
+++++ static int setup_hpet_msi(unsigned int irq, unsigned int id)
+++++ {
+++++ struct irq_2_iommu *irte_info;
+++++ struct irq_cfg *cfg;
+++++ int index, devid;
+++++
+++++ cfg = irq_get_chip_data(irq);
+++++ if (!cfg)
+++++ return -EINVAL;
+++++
+++++ irte_info = &cfg->irq_2_iommu;
+++++ devid = get_hpet_devid(id);
+++++ if (devid < 0)
+++++ return devid;
+++++
+++++ index = alloc_irq_index(cfg, devid, 1);
+++++ if (index < 0)
+++++ return index;
+++++
+++++ irte_info->sub_handle = devid;
+++++ irte_info->irte_index = index;
+++++ irte_info->iommu = (void *)cfg;
+++++
+++++ return 0;
+++++ }
+++++
+++++ struct irq_remap_ops amd_iommu_irq_ops = {
+++++ .supported = amd_iommu_supported,
+++++ .prepare = amd_iommu_prepare,
+++++ .enable = amd_iommu_enable,
+++++ .disable = amd_iommu_disable,
+++++ .reenable = amd_iommu_reenable,
+++++ .enable_faulting = amd_iommu_enable_faulting,
+++++ .setup_ioapic_entry = setup_ioapic_entry,
+++++ .set_affinity = set_affinity,
+++++ .free_irq = free_irq,
+++++ .compose_msi_msg = compose_msi_msg,
+++++ .msi_alloc_irq = msi_alloc_irq,
+++++ .msi_setup_irq = msi_setup_irq,
+++++ .setup_hpet_msi = setup_hpet_msi,
+++++ };
+++++ #endif
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
--- --#include <linux/acpi.h>
#include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
+++++ #include <asm/io_apic.h>
+++++ #include <asm/irq_remapping.h>
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
+++++ #include "irq_remapping.h"
/*
* definitions for the ACPI scanning code
#define IVHD_DEV_ALIAS_RANGE 0x43
#define IVHD_DEV_EXT_SELECT 0x46
#define IVHD_DEV_EXT_SELECT_RANGE 0x47
+++++ #define IVHD_DEV_SPECIAL 0x48
+++++
+++++ #define IVHD_SPECIAL_IOAPIC 1
+++++ #define IVHD_SPECIAL_HPET 2
#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
#define IVHD_FLAG_PASSPW_EN_MASK 0x02
} __attribute__((packed));
bool amd_iommu_dump;
+++++ bool amd_iommu_irq_remap __read_mostly;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
*/
struct amd_iommu **amd_iommu_rlookup_table;
--- - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+ /*
- * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+++++ * This table is used to find the irq remapping table for a given device id
+++++ * quickly.
+++++ */
+++++ struct irq_remap_table **irq_lookup_table;
+++++
+++ + /*
+++ ++ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
/****************************************************************************
*
--- -- * The following functions belong the the code path which parses the ACPI table
+++ ++ * The following functions belong to the code path which parses the ACPI table
* the second time. In this ACPI parsing iteration we allocate IOMMU specific
* data structures, initialize the device/alias/rlookup table and also
* basically initialize the hardware.
set_iommu_for_device(iommu, devid);
}
+++++ static int add_special_device(u8 type, u8 id, u16 devid)
+++++ {
+++++ struct devid_map *entry;
+++++ struct list_head *list;
+++++
+++++ if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
+++++ return -EINVAL;
+++++
+++++ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+++++ if (!entry)
+++++ return -ENOMEM;
+++++
+++++ entry->id = id;
+++++ entry->devid = devid;
+++++
+++++ if (type == IVHD_SPECIAL_IOAPIC)
+++++ list = &ioapic_map;
+++++ else
+++++ list = &hpet_map;
+++++
+++++ list_add_tail(&entry->list, list);
+++++
+++++ return 0;
+++++ }
+++++
/*
--- -- * Reads the device exclusion range from ACPI and initialize IOMMU with
+++ ++ * Reads the device exclusion range from ACPI and initializes the IOMMU with
* it
*/
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
* Takes a pointer to an AMD IOMMU entry in the ACPI table and
* initializes the hardware and our data structures with it.
*/
----- static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
+++++ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
struct ivhd_header *h)
{
u8 *p = (u8 *)h;
flags, ext_flags);
}
break;
+++++ case IVHD_DEV_SPECIAL: {
+++++ u8 handle, type;
+++++ const char *var;
+++++ u16 devid;
+++++ int ret;
+++++
+++++ handle = e->ext & 0xff;
+++++ devid = (e->ext >> 8) & 0xffff;
+++++ type = (e->ext >> 24) & 0xff;
+++++
+++++ if (type == IVHD_SPECIAL_IOAPIC)
+++++ var = "IOAPIC";
+++++ else if (type == IVHD_SPECIAL_HPET)
+++++ var = "HPET";
+++++ else
+++++ var = "UNKNOWN";
+++++
+++++ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+++++ var, (int)handle,
+++++ PCI_BUS(devid),
+++++ PCI_SLOT(devid),
+++++ PCI_FUNC(devid));
+++++
+++++ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+++++ ret = add_special_device(type, handle, devid);
+++++ if (ret)
+++++ return ret;
+++++ break;
+++++ }
default:
break;
}
p += ivhd_entry_length(p);
}
+++++
+++++ return 0;
}
/* Initializes the device->iommu mapping for the driver */
*/
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
+++++ int ret;
+++++
spin_lock_init(&iommu->lock);
/* Add IOMMU to internal data structures */
iommu->int_enabled = false;
----- init_iommu_from_acpi(iommu, h);
+++++ ret = init_iommu_from_acpi(iommu, h);
+++++ if (ret)
+++++ return ret;
+++++
+++++ /*
+++++ * Make sure IOMMU is not considered to translate itself. The IVRS
+++++ * table tells us so, but this is a lie!
+++++ */
+++++ amd_iommu_rlookup_table[iommu->devid] = NULL;
+++++
init_iommu_devices(iommu);
return 0;
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pr_info("AMD-Vi: Extended features: ");
- for (i = 0; ARRAY_SIZE(feat_str); ++i) {
+ for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
--- -- }
pr_cont("\n");
+++ ++ }
}
+++++ if (irq_remapping_enabled)
+++++ pr_info("AMD-Vi: Interrupt remapping enabled\n");
}
static int __init amd_iommu_init_pci(void)
break;
}
- /* Make sure ACS will be enabled */
- pci_request_acs();
-
ret = amd_iommu_init_devices();
print_iommu_info();
/****************************************************************************
*
* The following functions initialize the MSI interrupts for all IOMMUs
--- -- * in the system. Its a bit challenging because there could be multiple
+++ ++ * in the system. It's a bit challenging because there could be multiple
* IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
* pci_dev.
*
*
* The next functions belong to the third pass of parsing the ACPI
* table. In this last pass the memory mapping requirements are
--- -- * gathered (like exclusion and unity mapping reanges).
+++ ++ * gathered (like exclusion and unity mapping ranges).
*
****************************************************************************/
* Init the device table to not allow DMA access for devices and
* suppress all page faults
*/
----- static void init_device_table(void)
+++++ static void init_device_table_dma(void)
{
u32 devid;
}
}
+++++ static void __init uninit_device_table_dma(void)
+++++ {
+++++ u32 devid;
+++++
+++++ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+++++ amd_iommu_dev_table[devid].data[0] = 0ULL;
+++++ amd_iommu_dev_table[devid].data[1] = 0ULL;
+++++ }
+++++ }
+++++
+++++ static void init_device_table(void)
+++++ {
+++++ u32 devid;
+++++
+++++ if (!amd_iommu_irq_remap)
+++++ return;
+++++
+++++ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
+++++ set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+++++ }
+++++
static void iommu_init_flags(struct amd_iommu *iommu)
{
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
static void __init free_on_init_error(void)
{
----- amd_iommu_uninit_devices();
+++++ free_pages((unsigned long)irq_lookup_table,
+++++ get_order(rlookup_table_size));
----- free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
----- get_order(MAX_DOMAIN_ID/8));
+++++ if (amd_iommu_irq_cache) {
+++++ kmem_cache_destroy(amd_iommu_irq_cache);
+++++ amd_iommu_irq_cache = NULL;
+++++
+++++ }
free_pages((unsigned long)amd_iommu_rlookup_table,
get_order(rlookup_table_size));
free_iommu_all();
----- free_unity_maps();
-----
#ifdef CONFIG_GART_IOMMU
/*
* We failed to initialize the AMD IOMMU - try fallback to GART
#endif
}
+++++ static bool __init check_ioapic_information(void)
+++++ {
+++++ int idx;
+++++
+++++ for (idx = 0; idx < nr_ioapics; idx++) {
+++++ int id = mpc_ioapic_id(idx);
+++++
+++++ if (get_ioapic_devid(id) < 0) {
+++++ pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id);
+++++ pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n");
+++++ return false;
+++++ }
+++++ }
+++++
+++++ return true;
+++++ }
+++++
+++++ static void __init free_dma_resources(void)
+++++ {
+++++ amd_iommu_uninit_devices();
+++++
+++++ free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
+++++ get_order(MAX_DOMAIN_ID/8));
+++++
+++++ free_unity_maps();
+++++ }
+++++
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
if (amd_iommu_pd_alloc_bitmap == NULL)
goto out;
----- /* init the device table */
----- init_device_table();
-----
/*
* let all alias entries point to itself
*/
if (ret)
goto out;
+++++ if (amd_iommu_irq_remap)
+++++ amd_iommu_irq_remap = check_ioapic_information();
+++++
+++++ if (amd_iommu_irq_remap) {
+++++ /*
+++++ * Interrupt remapping enabled, create kmem_cache for the
+++++ * remapping tables.
+++++ */
+++++ amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
+++++ MAX_IRQS_PER_TABLE * sizeof(u32),
+++++ IRQ_TABLE_ALIGNMENT,
+++++ 0, NULL);
+++++ if (!amd_iommu_irq_cache)
+++++ goto out;
+++++
+++++ irq_lookup_table = (void *)__get_free_pages(
+++++ GFP_KERNEL | __GFP_ZERO,
+++++ get_order(rlookup_table_size));
+++++ if (!irq_lookup_table)
+++++ goto out;
+++++ }
+++++
ret = init_memory_definitions(ivrs_base);
if (ret)
goto out;
+++++ /* init the device table */
+++++ init_device_table();
+++++
out:
/* Don't leak any ACPI memory */
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ /* Make sure ACS will be enabled during PCI probe */
+ pci_request_acs();
+
+++++ if (!disable_irq_remap)
+++++ amd_iommu_irq_remap = true;
+++++
return true;
}
static int amd_iommu_init_dma(void)
{
+++++ struct amd_iommu *iommu;
int ret;
+++++ init_device_table_dma();
+++++
+++++ for_each_iommu(iommu)
+++++ iommu_flush_all_caches(iommu);
+++++
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
return ret;
}
+++++ #ifdef CONFIG_IRQ_REMAP
+++++ int __init amd_iommu_prepare(void)
+++++ {
+++++ return iommu_go_to_state(IOMMU_ACPI_FINISHED);
+++++ }
+ +++
+++++ int __init amd_iommu_supported(void)
+++++ {
+++++ return amd_iommu_irq_remap ? 1 : 0;
+++++ }
+++++
+++++ int __init amd_iommu_enable(void)
+++++ {
+++++ int ret;
+++++
+++++ ret = iommu_go_to_state(IOMMU_ENABLED);
+++++ if (ret)
+++++ return ret;
+++++
+++++ irq_remapping_enabled = 1;
+++ +
+++++ return 0;
+++++ }
+++++
+++++ void amd_iommu_disable(void)
+++++ {
+++++ amd_iommu_suspend();
+++++ }
+++++
+++++ int amd_iommu_reenable(int mode)
+++++ {
+++++ amd_iommu_resume();
+++++
+++++ return 0;
+++++ }
+ +
+++++ int __init amd_iommu_enable_faulting(void)
+++++ {
+++++ /* We enable MSI later when PCI is initialized */
+++++ return 0;
+++++ }
+++++ #endif
/*
* This is the core init function for AMD IOMMU hardware in the system.
ret = iommu_go_to_state(IOMMU_INITIALIZED);
if (ret) {
----- disable_iommus();
----- free_on_init_error();
+++++ free_dma_resources();
+++++ if (!irq_remapping_enabled) {
+++++ disable_iommus();
+++++ free_on_init_error();
+++++ } else {
+++++ struct amd_iommu *iommu;
+++++
+++++ uninit_device_table_dma();
+++++ for_each_iommu(iommu)
+++++ iommu_flush_all_caches(iommu);
+++++ }
}
return ret;
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_INV_IOTLB_PAGES 0x04
+++++ #define CMD_INV_IRT 0x05
#define CMD_COMPLETE_PPR 0x07
#define CMD_INV_ALL 0x08
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
+++++ #define DEV_ENTRY_IRQ_TBL_EN 0x80
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
#define DEV_ENTRY_NMI_PASS 0xba
#define DEV_ENTRY_MODE_MASK 0x07
#define DEV_ENTRY_MODE_SHIFT 0x09
+++++ #define MAX_DEV_TABLE_ENTRIES 0xffff
+++++
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
#define CMD_BUFFER_UNINITIALIZED 1
#define PAGE_SIZE_ALIGN(address, pagesize) \
((address) & ~((pagesize) - 1))
/*
--- -- * Creates an IOMMU PTE for an address an a given pagesize
+++ ++ * Creates an IOMMU PTE for an address and a given pagesize
* The PTE has no permission bits set
* Pagesize is expected to be a power-of-two larger than 4096
*/
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
+++++ #define MAX_IRQS_PER_TABLE 256
+++++ #define IRQ_TABLE_ALIGNMENT 128
+++++
+++++ struct irq_remap_table {
+++++ spinlock_t lock;
+++++ unsigned min_index;
+++++ u32 *table;
+++++ };
+++++
+++++ extern struct irq_remap_table **irq_lookup_table;
+++++
+++++ /* Interrupt remapping feature used? */
+++++ extern bool amd_iommu_irq_remap;
+++++
+++++ /* kmem_cache to get tables with 128 byte alignement */
+++++ extern struct kmem_cache *amd_iommu_irq_cache;
+++++
/*
* Make iterating over all IOMMUs easier
*/
struct list_head dev_data_list; /* For global dev_data_list */
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
--- -- atomic_t bind; /* Domain attach reverent count */
+++ ++ atomic_t bind; /* Domain attach reference count */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
u32 stored_l2[0x83];
};
+++++ struct devid_map {
+++++ struct list_head list;
+++++ u8 id;
+++++ u16 devid;
+++++ };
+++++
+++++ /* Map HPET and IOAPIC ids to the devid used by the IOMMU */
+++++ extern struct list_head ioapic_map;
+++++ extern struct list_head hpet_map;
+++++
/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
return (((u16)bus) << 8) | devfn;
}
+++++ static inline int get_ioapic_devid(int id)
+++++ {
+++++ struct devid_map *entry;
+++++
+++++ list_for_each_entry(entry, &ioapic_map, list) {
+++++ if (entry->id == id)
+++++ return entry->devid;
+++++ }
+++++
+++++ return -EINVAL;
+++++ }
+++++
+++++ static inline int get_hpet_devid(int id)
+++++ {
+++++ struct devid_map *entry;
+++++
+++++ list_for_each_entry(entry, &hpet_map, list) {
+++++ if (entry->id == id)
+++++ return entry->devid;
+++++ }
+++++
+++++ return -EINVAL;
+++++ }
+++++
#ifdef CONFIG_AMD_IOMMU_STATS
struct __iommu_counter {
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+ ++++#include <linux/debugfs.h>
+ ++++#include <linux/seq_file.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <mach/iomap.h>
- ----#include <mach/smmu.h>
#include <mach/tegra-ahb.h>
+ ++++enum smmu_hwgrp {
+ ++++ HWGRP_AFI,
+ ++++ HWGRP_AVPC,
+ ++++ HWGRP_DC,
+ ++++ HWGRP_DCB,
+ ++++ HWGRP_EPP,
+ ++++ HWGRP_G2,
+ ++++ HWGRP_HC,
+ ++++ HWGRP_HDA,
+ ++++ HWGRP_ISP,
+ ++++ HWGRP_MPE,
+ ++++ HWGRP_NV,
+ ++++ HWGRP_NV2,
+ ++++ HWGRP_PPCS,
+ ++++ HWGRP_SATA,
+ ++++ HWGRP_VDE,
+ ++++ HWGRP_VI,
+ ++++
+ ++++ HWGRP_COUNT,
+ ++++
+ ++++ HWGRP_END = ~0,
+ ++++};
+ ++++
+ ++++#define HWG_AFI (1 << HWGRP_AFI)
+ ++++#define HWG_AVPC (1 << HWGRP_AVPC)
+ ++++#define HWG_DC (1 << HWGRP_DC)
+ ++++#define HWG_DCB (1 << HWGRP_DCB)
+ ++++#define HWG_EPP (1 << HWGRP_EPP)
+ ++++#define HWG_G2 (1 << HWGRP_G2)
+ ++++#define HWG_HC (1 << HWGRP_HC)
+ ++++#define HWG_HDA (1 << HWGRP_HDA)
+ ++++#define HWG_ISP (1 << HWGRP_ISP)
+ ++++#define HWG_MPE (1 << HWGRP_MPE)
+ ++++#define HWG_NV (1 << HWGRP_NV)
+ ++++#define HWG_NV2 (1 << HWGRP_NV2)
+ ++++#define HWG_PPCS (1 << HWGRP_PPCS)
+ ++++#define HWG_SATA (1 << HWGRP_SATA)
+ ++++#define HWG_VDE (1 << HWGRP_VDE)
+ ++++#define HWG_VI (1 << HWGRP_VI)
+ ++++
/* bitmap of the page sizes currently supported */
#define SMMU_IOMMU_PGSIZES (SZ_4K)
#define SMMU_CONFIG_DISABLE 0
#define SMMU_CONFIG_ENABLE 1
- ----#define SMMU_TLB_CONFIG 0x14
- ----#define SMMU_TLB_CONFIG_STATS__MASK (1 << 31)
- ----#define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31)
+ ++++/* REVISIT: To support multiple MCs */
+ ++++enum {
+ ++++ _MC = 0,
+ ++++};
+ ++++
+ ++++enum {
+ ++++ _TLB = 0,
+ ++++ _PTC,
+ ++++};
+ ++++
+ ++++#define SMMU_CACHE_CONFIG_BASE 0x14
+ ++++#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
+ ++++#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
+ ++++
+ ++++#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
+ ++++#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
+ ++++#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
+ ++++#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
+ ++++
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
- ----#define SMMU_PTC_CONFIG 0x18
- ----#define SMMU_PTC_CONFIG_STATS__MASK (1 << 31)
- ----#define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31)
#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
#define SMMU_ASID_SECURITY 0x38
- ----#define SMMU_STATS_TLB_HIT_COUNT 0x1f0
- ----#define SMMU_STATS_TLB_MISS_COUNT 0x1f4
- ----#define SMMU_STATS_PTC_HIT_COUNT 0x1f8
- ----#define SMMU_STATS_PTC_MISS_COUNT 0x1fc
+ ++++#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
+ ++++
+ ++++#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
+ ++++ (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
#define SMMU_TRANSLATION_ENABLE_0 0x228
#define SMMU_TRANSLATION_ENABLE_1 0x22c
spinlock_t client_lock; /* for client list */
};
+ ++++struct smmu_debugfs_info {
+ ++++ struct smmu_device *smmu;
+ ++++ int mc;
+ ++++ int cache;
+ ++++};
+ ++++
/*
* Per SMMU device - IOMMU device
*/
unsigned long translation_enable_2;
unsigned long asid_security;
+ ++++ struct dentry *debugfs_root;
+ ++++ struct smmu_debugfs_info *debugfs_info;
+ ++++
struct device_node *ahb;
int num_as;
smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
- ---- smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG);
- ---- smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG);
+ ++++ smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
+ ++++ smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
smmu_flush_regs(smmu, 1);
goto out;
}
}
- dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
+ dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
out:
spin_unlock(&as->client_lock);
}
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
- int i, err = -ENODEV;
+ int i, err = -EAGAIN;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
as = &smmu->as[i];
- if (!as->pdir_page) {
- err = alloc_pdir(as);
- if (!err)
- goto found;
- }
+
+ if (as->pdir_page)
+ continue;
+
+ err = alloc_pdir(as);
+ if (!err)
+ goto found;
+
if (err != -EAGAIN)
break;
}
.pgsize_bitmap = SMMU_IOMMU_PGSIZES,
};
+ ++++/* Should be in the order of enum */
+ ++++static const char * const smmu_debugfs_mc[] = { "mc", };
+ ++++static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
+ ++++
+ ++++static ssize_t smmu_debugfs_stats_write(struct file *file,
+ ++++ const char __user *buffer,
+ ++++ size_t count, loff_t *pos)
+ ++++{
+ ++++ struct smmu_debugfs_info *info;
+ ++++ struct smmu_device *smmu;
+ ++++ struct dentry *dent;
+ ++++ int i;
+ ++++ enum {
+ ++++ _OFF = 0,
+ ++++ _ON,
+ ++++ _RESET,
+ ++++ };
+ ++++ const char * const command[] = {
+ ++++ [_OFF] = "off",
+ ++++ [_ON] = "on",
+ ++++ [_RESET] = "reset",
+ ++++ };
+ ++++ char str[] = "reset";
+ ++++ u32 val;
+ ++++ size_t offs;
+ ++++
+ ++++ count = min_t(size_t, count, sizeof(str));
+ ++++ if (copy_from_user(str, buffer, count))
+ ++++ return -EINVAL;
+ ++++
+ ++++ for (i = 0; i < ARRAY_SIZE(command); i++)
+ ++++ if (strncmp(str, command[i],
+ ++++ strlen(command[i])) == 0)
+ ++++ break;
+ ++++
+ ++++ if (i == ARRAY_SIZE(command))
+ ++++ return -EINVAL;
+ ++++
+ ++++ dent = file->f_dentry;
+ ++++ info = dent->d_inode->i_private;
+ ++++ smmu = info->smmu;
+ ++++
+ ++++ offs = SMMU_CACHE_CONFIG(info->cache);
+ ++++ val = smmu_read(smmu, offs);
+ ++++ switch (i) {
+ ++++ case _OFF:
+ ++++ val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
+ ++++ val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+ ++++ smmu_write(smmu, val, offs);
+ ++++ break;
+ ++++ case _ON:
+ ++++ val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
+ ++++ val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+ ++++ smmu_write(smmu, val, offs);
+ ++++ break;
+ ++++ case _RESET:
+ ++++ val |= SMMU_CACHE_CONFIG_STATS_TEST;
+ ++++ smmu_write(smmu, val, offs);
+ ++++ val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+ ++++ smmu_write(smmu, val, offs);
+ ++++ break;
+ ++++ default:
+ ++++ BUG();
+ ++++ break;
+ ++++ }
+ ++++
+ ++++ dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
+ ++++ val, smmu_read(smmu, offs), offs);
+ ++++
+ ++++ return count;
+ ++++}
+ ++++
+ ++++static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
+ ++++{
+ ++++ struct smmu_debugfs_info *info;
+ ++++ struct smmu_device *smmu;
+ ++++ struct dentry *dent;
+ ++++ int i;
+ ++++ const char * const stats[] = { "hit", "miss", };
+ ++++
+ ++++ dent = d_find_alias(s->private);
+ ++++ info = dent->d_inode->i_private;
+ ++++ smmu = info->smmu;
+ ++++
+ ++++ for (i = 0; i < ARRAY_SIZE(stats); i++) {
+ ++++ u32 val;
+ ++++ size_t offs;
+ ++++
+ ++++ offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
+ ++++ val = smmu_read(smmu, offs);
+ ++++ seq_printf(s, "%s:%08x ", stats[i], val);
+ ++++
+ ++++ dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
+ ++++ stats[i], val, offs);
+ ++++ }
+ ++++ seq_printf(s, "\n");
+ ++++
+ ++++ return 0;
+ ++++}
+ ++++
+ ++++static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
+ ++++{
+ ++++ return single_open(file, smmu_debugfs_stats_show, inode);
+ ++++}
+ ++++
+ ++++static const struct file_operations smmu_debugfs_stats_fops = {
+ ++++ .open = smmu_debugfs_stats_open,
+ ++++ .read = seq_read,
+ ++++ .llseek = seq_lseek,
+ ++++ .release = single_release,
+ ++++ .write = smmu_debugfs_stats_write,
+ ++++};
+ ++++
+ ++++static void smmu_debugfs_delete(struct smmu_device *smmu)
+ ++++{
+ ++++ debugfs_remove_recursive(smmu->debugfs_root);
+ ++++ kfree(smmu->debugfs_info);
+ ++++}
+ ++++
+ ++++static void smmu_debugfs_create(struct smmu_device *smmu)
+ ++++{
+ ++++ int i;
+ ++++ size_t bytes;
+ ++++ struct dentry *root;
+ ++++
+ ++++ bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
+ ++++ sizeof(*smmu->debugfs_info);
+ ++++ smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
+ ++++ if (!smmu->debugfs_info)
+ ++++ return;
+ ++++
+ ++++ root = debugfs_create_dir(dev_name(smmu->dev), NULL);
+ ++++ if (!root)
+ ++++ goto err_out;
+ ++++ smmu->debugfs_root = root;
+ ++++
+ ++++ for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
+ ++++ int j;
+ ++++ struct dentry *mc;
+ ++++
+ ++++ mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
+ ++++ if (!mc)
+ ++++ goto err_out;
+ ++++
+ ++++ for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
+ ++++ struct dentry *cache;
+ ++++ struct smmu_debugfs_info *info;
+ ++++
+ ++++ info = smmu->debugfs_info;
+ ++++ info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
+ ++++ info->smmu = smmu;
+ ++++ info->mc = i;
+ ++++ info->cache = j;
+ ++++
+ ++++ cache = debugfs_create_file(smmu_debugfs_cache[j],
+ ++++ S_IWUGO | S_IRUGO, mc,
+ ++++ (void *)info,
+ ++++ &smmu_debugfs_stats_fops);
+ ++++ if (!cache)
+ ++++ goto err_out;
+ ++++ }
+ ++++ }
+ ++++
+ ++++ return;
+ ++++
+ ++++err_out:
+ ++++ smmu_debugfs_delete(smmu);
+ ++++}
+ ++++
static int tegra_smmu_suspend(struct device *dev)
{
struct smmu_device *smmu = dev_get_drvdata(dev);
if (!smmu->avp_vector_page)
return -ENOMEM;
+ ++++ smmu_debugfs_create(smmu);
smmu_handle = smmu;
return 0;
}
struct smmu_device *smmu = platform_get_drvdata(pdev);
int i;
+ ++++ smmu_debugfs_delete(smmu);
+ ++++
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
for (i = 0; i < smmu->num_as; i++)
free_pdir(&smmu->as[i]);