2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "%s: " fmt, __func__
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
30 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
35 * bus_set_iommu - set iommu-callbacks for the bus
37 * @ops: the callbacks provided by the iommu-driver
39 * This function is called by an iommu driver to set the iommu methods
40 * used for a particular bus. Drivers for devices on that bus can use
41 * the iommu-api after these ops are registered.
42 * This special function is needed because IOMMUs are usually devices on
43 * the bus itself, so the iommu drivers are not initialized when the bus
44 * is set up. With this function the iommu-driver can set the iommu-ops
47 int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
49 if (bus->iommu_ops != NULL)
54 /* Do IOMMU specific setup for this bus-type */
55 iommu_bus_init(bus, ops);
59 EXPORT_SYMBOL_GPL(bus_set_iommu);
61 bool iommu_present(struct bus_type *bus)
63 return bus->iommu_ops != NULL;
65 EXPORT_SYMBOL_GPL(iommu_present);
68 * iommu_set_fault_handler() - set a fault handler for an iommu domain
69 * @domain: iommu domain
70 * @handler: fault handler
72 * This function should be used by IOMMU users which want to be notified
73 * whenever an IOMMU fault happens.
75 * The fault handler itself should return 0 on success, and an appropriate
76 * error code otherwise.
78 void iommu_set_fault_handler(struct iommu_domain *domain,
79 iommu_fault_handler_t handler)
83 domain->handler = handler;
85 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
87 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
89 struct iommu_domain *domain;
92 if (bus == NULL || bus->iommu_ops == NULL)
95 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
99 domain->ops = bus->iommu_ops;
101 ret = domain->ops->domain_init(domain);
112 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
114 void iommu_domain_free(struct iommu_domain *domain)
116 if (likely(domain->ops->domain_destroy != NULL))
117 domain->ops->domain_destroy(domain);
121 EXPORT_SYMBOL_GPL(iommu_domain_free);
123 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
125 if (unlikely(domain->ops->attach_dev == NULL))
128 return domain->ops->attach_dev(domain, dev);
130 EXPORT_SYMBOL_GPL(iommu_attach_device);
132 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
134 if (unlikely(domain->ops->detach_dev == NULL))
137 domain->ops->detach_dev(domain, dev);
139 EXPORT_SYMBOL_GPL(iommu_detach_device);
141 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
144 if (unlikely(domain->ops->iova_to_phys == NULL))
147 return domain->ops->iova_to_phys(domain, iova);
149 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
151 int iommu_domain_has_cap(struct iommu_domain *domain,
154 if (unlikely(domain->ops->domain_has_cap == NULL))
157 return domain->ops->domain_has_cap(domain, cap);
159 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
161 int iommu_map(struct iommu_domain *domain, unsigned long iova,
162 phys_addr_t paddr, size_t size, int prot)
164 unsigned long orig_iova = iova;
165 unsigned int min_pagesz;
166 size_t orig_size = size;
169 if (unlikely(domain->ops->map == NULL))
172 /* find out the minimum page size supported */
173 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
176 * both the virtual address and the physical one, as well as
177 * the size of the mapping, must be aligned (at least) to the
178 * size of the smallest page supported by the hardware
180 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
181 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
182 "0x%x\n", iova, (unsigned long)paddr,
183 (unsigned long)size, min_pagesz);
187 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
188 (unsigned long)paddr, (unsigned long)size);
191 unsigned long pgsize, addr_merge = iova | paddr;
192 unsigned int pgsize_idx;
194 /* Max page size that still fits into 'size' */
195 pgsize_idx = __fls(size);
197 /* need to consider alignment requirements ? */
198 if (likely(addr_merge)) {
199 /* Max page size allowed by both iova and paddr */
200 unsigned int align_pgsize_idx = __ffs(addr_merge);
202 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
205 /* build a mask of acceptable page sizes */
206 pgsize = (1UL << (pgsize_idx + 1)) - 1;
208 /* throw away page sizes not supported by the hardware */
209 pgsize &= domain->ops->pgsize_bitmap;
211 /* make sure we're still sane */
214 /* pick the biggest page */
215 pgsize_idx = __fls(pgsize);
216 pgsize = 1UL << pgsize_idx;
218 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
219 (unsigned long)paddr, pgsize);
221 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
230 /* unroll mapping in case something went wrong */
232 iommu_unmap(domain, orig_iova, orig_size - size);
236 EXPORT_SYMBOL_GPL(iommu_map);
238 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
240 size_t unmapped_page, unmapped = 0;
241 unsigned int min_pagesz;
243 if (unlikely(domain->ops->unmap == NULL))
246 /* find out the minimum page size supported */
247 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
250 * The virtual address, as well as the size of the mapping, must be
251 * aligned (at least) to the size of the smallest page supported
254 if (!IS_ALIGNED(iova | size, min_pagesz)) {
255 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
256 iova, (unsigned long)size, min_pagesz);
260 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
261 (unsigned long)size);
264 * Keep iterating until we either unmap 'size' bytes (or more)
265 * or we hit an area that isn't mapped.
267 while (unmapped < size) {
268 size_t left = size - unmapped;
270 unmapped_page = domain->ops->unmap(domain, iova, left);
274 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
275 (unsigned long)unmapped_page);
277 iova += unmapped_page;
278 unmapped += unmapped_page;
283 EXPORT_SYMBOL_GPL(iommu_unmap);