2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 39-bit addressing
28 * - Context fault reporting
31 #define pr_fmt(fmt) "arm-smmu: " fmt
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
38 #include <linux/iommu.h>
40 #include <linux/module.h>
42 #include <linux/platform_device.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
46 #include <linux/amba/bus.h>
48 #include <asm/pgalloc.h>
50 /* Maximum number of stream IDs assigned to a single device */
51 #define MAX_MASTER_STREAMIDS 8
53 /* Maximum number of context banks per SMMU */
54 #define ARM_SMMU_MAX_CBS 128
56 /* Maximum number of mapping groups per SMMU */
57 #define ARM_SMMU_MAX_SMRS 128
59 /* Number of VMIDs per SMMU */
60 #define ARM_SMMU_NUM_VMIDS 256
62 /* SMMU global address space */
63 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
64 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
67 #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
68 #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
69 #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
70 #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
71 #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
72 #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
74 #if PAGE_SIZE == SZ_4K
75 #define ARM_SMMU_PTE_CONT_ENTRIES 16
76 #elif PAGE_SIZE == SZ_64K
77 #define ARM_SMMU_PTE_CONT_ENTRIES 32
79 #define ARM_SMMU_PTE_CONT_ENTRIES 1
82 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
83 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
84 #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
87 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
88 #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
89 #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
90 #define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
93 #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
94 #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
95 #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
96 #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
97 #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
98 #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
100 /* Configuration registers */
101 #define ARM_SMMU_GR0_sCR0 0x0
102 #define sCR0_CLIENTPD (1 << 0)
103 #define sCR0_GFRE (1 << 1)
104 #define sCR0_GFIE (1 << 2)
105 #define sCR0_GCFGFRE (1 << 4)
106 #define sCR0_GCFGFIE (1 << 5)
107 #define sCR0_USFCFG (1 << 10)
108 #define sCR0_VMIDPNE (1 << 11)
109 #define sCR0_PTM (1 << 12)
110 #define sCR0_FB (1 << 13)
111 #define sCR0_BSU_SHIFT 14
112 #define sCR0_BSU_MASK 0x3
114 /* Identification registers */
115 #define ARM_SMMU_GR0_ID0 0x20
116 #define ARM_SMMU_GR0_ID1 0x24
117 #define ARM_SMMU_GR0_ID2 0x28
118 #define ARM_SMMU_GR0_ID3 0x2c
119 #define ARM_SMMU_GR0_ID4 0x30
120 #define ARM_SMMU_GR0_ID5 0x34
121 #define ARM_SMMU_GR0_ID6 0x38
122 #define ARM_SMMU_GR0_ID7 0x3c
123 #define ARM_SMMU_GR0_sGFSR 0x48
124 #define ARM_SMMU_GR0_sGFSYNR0 0x50
125 #define ARM_SMMU_GR0_sGFSYNR1 0x54
126 #define ARM_SMMU_GR0_sGFSYNR2 0x58
127 #define ARM_SMMU_GR0_PIDR0 0xfe0
128 #define ARM_SMMU_GR0_PIDR1 0xfe4
129 #define ARM_SMMU_GR0_PIDR2 0xfe8
131 #define ID0_S1TS (1 << 30)
132 #define ID0_S2TS (1 << 29)
133 #define ID0_NTS (1 << 28)
134 #define ID0_SMS (1 << 27)
135 #define ID0_PTFS_SHIFT 24
136 #define ID0_PTFS_MASK 0x2
137 #define ID0_PTFS_V8_ONLY 0x2
138 #define ID0_CTTW (1 << 14)
139 #define ID0_NUMIRPT_SHIFT 16
140 #define ID0_NUMIRPT_MASK 0xff
141 #define ID0_NUMSMRG_SHIFT 0
142 #define ID0_NUMSMRG_MASK 0xff
144 #define ID1_PAGESIZE (1 << 31)
145 #define ID1_NUMPAGENDXB_SHIFT 28
146 #define ID1_NUMPAGENDXB_MASK 7
147 #define ID1_NUMS2CB_SHIFT 16
148 #define ID1_NUMS2CB_MASK 0xff
149 #define ID1_NUMCB_SHIFT 0
150 #define ID1_NUMCB_MASK 0xff
152 #define ID2_OAS_SHIFT 4
153 #define ID2_OAS_MASK 0xf
154 #define ID2_IAS_SHIFT 0
155 #define ID2_IAS_MASK 0xf
156 #define ID2_UBS_SHIFT 8
157 #define ID2_UBS_MASK 0xf
158 #define ID2_PTFS_4K (1 << 12)
159 #define ID2_PTFS_16K (1 << 13)
160 #define ID2_PTFS_64K (1 << 14)
162 #define PIDR2_ARCH_SHIFT 4
163 #define PIDR2_ARCH_MASK 0xf
165 /* Global TLB invalidation */
166 #define ARM_SMMU_GR0_STLBIALL 0x60
167 #define ARM_SMMU_GR0_TLBIVMID 0x64
168 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
169 #define ARM_SMMU_GR0_TLBIALLH 0x6c
170 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
171 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
172 #define sTLBGSTATUS_GSACTIVE (1 << 0)
173 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
175 /* Stream mapping registers */
176 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
177 #define SMR_VALID (1 << 31)
178 #define SMR_MASK_SHIFT 16
179 #define SMR_MASK_MASK 0x7fff
180 #define SMR_ID_SHIFT 0
181 #define SMR_ID_MASK 0x7fff
183 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
184 #define S2CR_CBNDX_SHIFT 0
185 #define S2CR_CBNDX_MASK 0xff
186 #define S2CR_TYPE_SHIFT 16
187 #define S2CR_TYPE_MASK 0x3
188 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
189 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
190 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
192 /* Context bank attribute registers */
193 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
194 #define CBAR_VMID_SHIFT 0
195 #define CBAR_VMID_MASK 0xff
196 #define CBAR_S1_MEMATTR_SHIFT 12
197 #define CBAR_S1_MEMATTR_MASK 0xf
198 #define CBAR_S1_MEMATTR_WB 0xf
199 #define CBAR_TYPE_SHIFT 16
200 #define CBAR_TYPE_MASK 0x3
201 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
202 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
203 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
204 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
205 #define CBAR_IRPTNDX_SHIFT 24
206 #define CBAR_IRPTNDX_MASK 0xff
208 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
209 #define CBA2R_RW64_32BIT (0 << 0)
210 #define CBA2R_RW64_64BIT (1 << 0)
212 /* Translation context bank */
213 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
214 #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
216 #define ARM_SMMU_CB_SCTLR 0x0
217 #define ARM_SMMU_CB_RESUME 0x8
218 #define ARM_SMMU_CB_TTBCR2 0x10
219 #define ARM_SMMU_CB_TTBR0_LO 0x20
220 #define ARM_SMMU_CB_TTBR0_HI 0x24
221 #define ARM_SMMU_CB_TTBCR 0x30
222 #define ARM_SMMU_CB_S1_MAIR0 0x38
223 #define ARM_SMMU_CB_FSR 0x58
224 #define ARM_SMMU_CB_FAR_LO 0x60
225 #define ARM_SMMU_CB_FAR_HI 0x64
226 #define ARM_SMMU_CB_FSYNR0 0x68
227 #define ARM_SMMU_CB_S1_TLBIASID 0x610
229 #define SCTLR_S1_ASIDPNE (1 << 12)
230 #define SCTLR_CFCFG (1 << 7)
231 #define SCTLR_CFIE (1 << 6)
232 #define SCTLR_CFRE (1 << 5)
233 #define SCTLR_E (1 << 4)
234 #define SCTLR_AFE (1 << 2)
235 #define SCTLR_TRE (1 << 1)
236 #define SCTLR_M (1 << 0)
237 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
239 #define RESUME_RETRY (0 << 0)
240 #define RESUME_TERMINATE (1 << 0)
242 #define TTBCR_EAE (1 << 31)
244 #define TTBCR_PASIZE_SHIFT 16
245 #define TTBCR_PASIZE_MASK 0x7
247 #define TTBCR_TG0_4K (0 << 14)
248 #define TTBCR_TG0_64K (1 << 14)
250 #define TTBCR_SH0_SHIFT 12
251 #define TTBCR_SH0_MASK 0x3
252 #define TTBCR_SH_NS 0
253 #define TTBCR_SH_OS 2
254 #define TTBCR_SH_IS 3
256 #define TTBCR_ORGN0_SHIFT 10
257 #define TTBCR_IRGN0_SHIFT 8
258 #define TTBCR_RGN_MASK 0x3
259 #define TTBCR_RGN_NC 0
260 #define TTBCR_RGN_WBWA 1
261 #define TTBCR_RGN_WT 2
262 #define TTBCR_RGN_WB 3
264 #define TTBCR_SL0_SHIFT 6
265 #define TTBCR_SL0_MASK 0x3
266 #define TTBCR_SL0_LVL_2 0
267 #define TTBCR_SL0_LVL_1 1
269 #define TTBCR_T1SZ_SHIFT 16
270 #define TTBCR_T0SZ_SHIFT 0
271 #define TTBCR_SZ_MASK 0xf
273 #define TTBCR2_SEP_SHIFT 15
274 #define TTBCR2_SEP_MASK 0x7
276 #define TTBCR2_PASIZE_SHIFT 0
277 #define TTBCR2_PASIZE_MASK 0x7
279 /* Common definitions for PASize and SEP fields */
280 #define TTBCR2_ADDR_32 0
281 #define TTBCR2_ADDR_36 1
282 #define TTBCR2_ADDR_40 2
283 #define TTBCR2_ADDR_42 3
284 #define TTBCR2_ADDR_44 4
285 #define TTBCR2_ADDR_48 5
287 #define TTBRn_HI_ASID_SHIFT 16
289 #define MAIR_ATTR_SHIFT(n) ((n) << 3)
290 #define MAIR_ATTR_MASK 0xff
291 #define MAIR_ATTR_DEVICE 0x04
292 #define MAIR_ATTR_NC 0x44
293 #define MAIR_ATTR_WBRWA 0xff
294 #define MAIR_ATTR_IDX_NC 0
295 #define MAIR_ATTR_IDX_CACHE 1
296 #define MAIR_ATTR_IDX_DEV 2
298 #define FSR_MULTI (1 << 31)
299 #define FSR_SS (1 << 30)
300 #define FSR_UUT (1 << 8)
301 #define FSR_ASF (1 << 7)
302 #define FSR_TLBLKF (1 << 6)
303 #define FSR_TLBMCF (1 << 5)
304 #define FSR_EF (1 << 4)
305 #define FSR_PF (1 << 3)
306 #define FSR_AFF (1 << 2)
307 #define FSR_TF (1 << 1)
309 #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
311 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
312 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
314 #define FSYNR0_WNR (1 << 4)
316 struct arm_smmu_smr {
322 struct arm_smmu_master {
323 struct device_node *of_node;
326 * The following is specific to the master's position in the
331 u16 streamids[MAX_MASTER_STREAMIDS];
334 * We only need to allocate these on the root SMMU, as we
335 * configure unmatched streams to bypass translation.
337 struct arm_smmu_smr *smrs;
340 struct arm_smmu_device {
342 struct device_node *parent_of_node;
346 unsigned long pagesize;
348 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
349 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
350 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
351 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
352 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
356 u32 num_context_banks;
357 u32 num_s2_context_banks;
358 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
361 u32 num_mapping_groups;
362 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
364 unsigned long input_size;
365 unsigned long s1_output_size;
366 unsigned long s2_output_size;
369 u32 num_context_irqs;
372 DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
374 struct list_head list;
375 struct rb_root masters;
378 struct arm_smmu_cfg {
379 struct arm_smmu_device *smmu;
387 struct arm_smmu_domain {
389 * A domain can span across multiple, chained SMMUs and requires
390 * all devices within the domain to follow the same translation
393 struct arm_smmu_device *leaf_smmu;
394 struct arm_smmu_cfg root_cfg;
395 phys_addr_t output_mask;
400 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
401 static LIST_HEAD(arm_smmu_devices);
403 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
404 struct device_node *dev_node)
406 struct rb_node *node = smmu->masters.rb_node;
409 struct arm_smmu_master *master;
410 master = container_of(node, struct arm_smmu_master, node);
412 if (dev_node < master->of_node)
413 node = node->rb_left;
414 else if (dev_node > master->of_node)
415 node = node->rb_right;
423 static int insert_smmu_master(struct arm_smmu_device *smmu,
424 struct arm_smmu_master *master)
426 struct rb_node **new, *parent;
428 new = &smmu->masters.rb_node;
431 struct arm_smmu_master *this;
432 this = container_of(*new, struct arm_smmu_master, node);
435 if (master->of_node < this->of_node)
436 new = &((*new)->rb_left);
437 else if (master->of_node > this->of_node)
438 new = &((*new)->rb_right);
443 rb_link_node(&master->node, parent, new);
444 rb_insert_color(&master->node, &smmu->masters);
448 static int register_smmu_master(struct arm_smmu_device *smmu,
450 struct of_phandle_args *masterspec)
453 struct arm_smmu_master *master;
455 master = find_smmu_master(smmu, masterspec->np);
458 "rejecting multiple registrations for master device %s\n",
459 masterspec->np->name);
463 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
465 "reached maximum number (%d) of stream IDs for master device %s\n",
466 MAX_MASTER_STREAMIDS, masterspec->np->name);
470 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
474 master->of_node = masterspec->np;
475 master->num_streamids = masterspec->args_count;
477 for (i = 0; i < master->num_streamids; ++i)
478 master->streamids[i] = masterspec->args[i];
480 return insert_smmu_master(smmu, master);
483 static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu)
485 struct arm_smmu_device *parent;
487 if (!smmu->parent_of_node)
490 spin_lock(&arm_smmu_devices_lock);
491 list_for_each_entry(parent, &arm_smmu_devices, list)
492 if (parent->dev->of_node == smmu->parent_of_node)
497 "Failed to find SMMU parent despite parent in DT\n");
499 spin_unlock(&arm_smmu_devices_lock);
503 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
508 idx = find_next_zero_bit(map, end, start);
511 } while (test_and_set_bit(idx, map));
516 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
521 /* Wait for any pending TLB invalidations to complete */
522 static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
525 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
527 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
528 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
529 & sTLBGSTATUS_GSACTIVE) {
531 if (++count == TLB_LOOP_TIMEOUT) {
532 dev_err_ratelimited(smmu->dev,
533 "TLB sync timed out -- SMMU may be deadlocked\n");
540 static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg)
542 struct arm_smmu_device *smmu = cfg->smmu;
543 void __iomem *base = ARM_SMMU_GR0(smmu);
544 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
547 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
548 writel_relaxed(cfg->vmid, base + ARM_SMMU_CB_S1_TLBIASID);
550 base = ARM_SMMU_GR0(smmu);
551 writel_relaxed(cfg->vmid, base + ARM_SMMU_GR0_TLBIVMID);
554 arm_smmu_tlb_sync(smmu);
557 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
560 u32 fsr, far, fsynr, resume;
562 struct iommu_domain *domain = dev;
563 struct arm_smmu_domain *smmu_domain = domain->priv;
564 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
565 struct arm_smmu_device *smmu = root_cfg->smmu;
566 void __iomem *cb_base;
568 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
569 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
571 if (!(fsr & FSR_FAULT))
575 dev_err_ratelimited(smmu->dev,
576 "Unexpected context fault (fsr 0x%u)\n",
579 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
580 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
582 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
585 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
586 iova |= ((unsigned long)far << 32);
589 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
591 resume = RESUME_RETRY;
594 resume = RESUME_TERMINATE;
597 /* Clear the faulting FSR */
598 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
600 /* Retry or terminate any stalled transactions */
602 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
607 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
609 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
610 struct arm_smmu_device *smmu = dev;
611 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
613 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
617 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
618 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
619 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
621 dev_err_ratelimited(smmu->dev,
622 "Unexpected global fault, this could be serious\n");
623 dev_err_ratelimited(smmu->dev,
624 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
625 gfsr, gfsynr0, gfsynr1, gfsynr2);
627 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
631 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
635 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
636 struct arm_smmu_device *smmu = root_cfg->smmu;
637 void __iomem *cb_base, *gr0_base, *gr1_base;
639 gr0_base = ARM_SMMU_GR0(smmu);
640 gr1_base = ARM_SMMU_GR1(smmu);
641 stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS;
642 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
645 reg = root_cfg->cbar;
646 if (smmu->version == 1)
647 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
649 /* Use the weakest memory type, so it is overridden by the pte */
651 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
653 reg |= root_cfg->vmid << CBAR_VMID_SHIFT;
654 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
656 if (smmu->version > 1) {
659 reg = CBA2R_RW64_64BIT;
661 reg = CBA2R_RW64_32BIT;
664 gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx));
667 switch (smmu->input_size) {
669 reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
672 reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
675 reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
678 reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
681 reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
684 reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
688 switch (smmu->s1_output_size) {
690 reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
693 reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
696 reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
699 reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
702 reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
705 reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
710 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
714 reg = __pa(root_cfg->pgd);
715 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
716 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
718 reg |= root_cfg->vmid << TTBRn_HI_ASID_SHIFT;
719 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
723 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
725 if (smmu->version > 1) {
726 if (PAGE_SIZE == SZ_4K)
732 switch (smmu->s2_output_size) {
734 reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
737 reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
740 reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
743 reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
746 reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
749 reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
753 reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
760 (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
761 (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
762 (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
763 (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
764 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
766 /* MAIR0 (stage-1 only) */
768 reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
769 (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
770 (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
771 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
775 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
777 reg |= SCTLR_S1_ASIDPNE;
781 writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
784 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
788 struct arm_smmu_domain *smmu_domain = domain->priv;
789 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
790 struct arm_smmu_device *smmu, *parent;
793 * Walk the SMMU chain to find the root device for this chain.
794 * We assume that no masters have translations which terminate
795 * early, and therefore check that the root SMMU does indeed have
796 * a StreamID for the master in question.
798 parent = dev->archdata.iommu;
799 smmu_domain->output_mask = -1;
802 smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
803 } while ((parent = find_parent_smmu(smmu)));
805 if (!find_smmu_master(smmu, dev->of_node)) {
806 dev_err(dev, "unable to find root SMMU for device\n");
810 /* VMID zero is reserved for stage-1 mappings */
811 ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 1, ARM_SMMU_NUM_VMIDS);
812 if (IS_ERR_VALUE(ret))
815 root_cfg->vmid = ret;
816 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
818 * We will likely want to change this if/when KVM gets
821 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
822 start = smmu->num_s2_context_banks;
823 } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
824 root_cfg->cbar = CBAR_TYPE_S2_TRANS;
827 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
828 start = smmu->num_s2_context_banks;
831 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
832 smmu->num_context_banks);
833 if (IS_ERR_VALUE(ret))
836 root_cfg->cbndx = ret;
838 if (smmu->version == 1) {
839 root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
840 root_cfg->irptndx %= smmu->num_context_irqs;
842 root_cfg->irptndx = root_cfg->cbndx;
845 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
846 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
847 "arm-smmu-context-fault", domain);
848 if (IS_ERR_VALUE(ret)) {
849 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
850 root_cfg->irptndx, irq);
851 root_cfg->irptndx = -1;
852 goto out_free_context;
855 root_cfg->smmu = smmu;
856 arm_smmu_init_context_bank(smmu_domain);
860 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
862 __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
866 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
868 struct arm_smmu_domain *smmu_domain = domain->priv;
869 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
870 struct arm_smmu_device *smmu = root_cfg->smmu;
871 void __iomem *cb_base;
877 /* Disable the context bank and nuke the TLB before freeing it. */
878 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
879 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
880 arm_smmu_tlb_inv_context(root_cfg);
882 if (root_cfg->irptndx != -1) {
883 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
884 free_irq(irq, domain);
887 __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
888 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
891 static int arm_smmu_domain_init(struct iommu_domain *domain)
893 struct arm_smmu_domain *smmu_domain;
897 * Allocate the domain and initialise some of its data structures.
898 * We can't really do anything meaningful until we've added a
901 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
905 pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
907 goto out_free_domain;
908 smmu_domain->root_cfg.pgd = pgd;
910 spin_lock_init(&smmu_domain->lock);
911 domain->priv = smmu_domain;
919 static void arm_smmu_free_ptes(pmd_t *pmd)
921 pgtable_t table = pmd_pgtable(*pmd);
922 pgtable_page_dtor(table);
926 static void arm_smmu_free_pmds(pud_t *pud)
929 pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
932 for (i = 0; i < PTRS_PER_PMD; ++i) {
936 arm_smmu_free_ptes(pmd);
940 pmd_free(NULL, pmd_base);
943 static void arm_smmu_free_puds(pgd_t *pgd)
946 pud_t *pud, *pud_base = pud_offset(pgd, 0);
949 for (i = 0; i < PTRS_PER_PUD; ++i) {
953 arm_smmu_free_pmds(pud);
957 pud_free(NULL, pud_base);
960 static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
963 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
964 pgd_t *pgd, *pgd_base = root_cfg->pgd;
967 * Recursively free the page tables for this domain. We don't
968 * care about speculative TLB filling, because the TLB will be
969 * nuked next time this context bank is re-allocated and no devices
970 * currently map to these tables.
973 for (i = 0; i < PTRS_PER_PGD; ++i) {
976 arm_smmu_free_puds(pgd);
983 static void arm_smmu_domain_destroy(struct iommu_domain *domain)
985 struct arm_smmu_domain *smmu_domain = domain->priv;
988 * Free the domain resources. We assume that all devices have
989 * already been detached.
991 arm_smmu_destroy_domain_context(domain);
992 arm_smmu_free_pgtables(smmu_domain);
996 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
997 struct arm_smmu_master *master)
1000 struct arm_smmu_smr *smrs;
1001 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1003 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1009 smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL);
1011 dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n",
1012 master->num_streamids, master->of_node->name);
1016 /* Allocate the SMRs on the root SMMU */
1017 for (i = 0; i < master->num_streamids; ++i) {
1018 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1019 smmu->num_mapping_groups);
1020 if (IS_ERR_VALUE(idx)) {
1021 dev_err(smmu->dev, "failed to allocate free SMR\n");
1025 smrs[i] = (struct arm_smmu_smr) {
1027 .mask = 0, /* We don't currently share SMRs */
1028 .id = master->streamids[i],
1032 /* It worked! Now, poke the actual hardware */
1033 for (i = 0; i < master->num_streamids; ++i) {
1034 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1035 smrs[i].mask << SMR_MASK_SHIFT;
1036 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1039 master->smrs = smrs;
1044 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1049 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1050 struct arm_smmu_master *master)
1053 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1054 struct arm_smmu_smr *smrs = master->smrs;
1056 /* Invalidate the SMRs before freeing back to the allocator */
1057 for (i = 0; i < master->num_streamids; ++i) {
1058 u8 idx = smrs[i].idx;
1059 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1060 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1063 master->smrs = NULL;
1067 static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
1068 struct arm_smmu_master *master)
1071 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1073 for (i = 0; i < master->num_streamids; ++i) {
1074 u16 sid = master->streamids[i];
1075 writel_relaxed(S2CR_TYPE_BYPASS,
1076 gr0_base + ARM_SMMU_GR0_S2CR(sid));
1080 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1081 struct arm_smmu_master *master)
1084 struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu;
1085 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1087 ret = arm_smmu_master_configure_smrs(smmu, master);
1091 /* Bypass the leaves */
1092 smmu = smmu_domain->leaf_smmu;
1093 while ((parent = find_parent_smmu(smmu))) {
1095 * We won't have a StreamID match for anything but the root
1096 * smmu, so we only need to worry about StreamID indexing,
1097 * where we must install bypass entries in the S2CRs.
1099 if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
1102 arm_smmu_bypass_stream_mapping(smmu, master);
1106 /* Now we're at the root, time to point at our context bank */
1107 for (i = 0; i < master->num_streamids; ++i) {
1109 idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
1110 s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
1111 (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
1112 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1118 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1119 struct arm_smmu_master *master)
1121 struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu;
1124 * We *must* clear the S2CR first, because freeing the SMR means
1125 * that it can be re-allocated immediately.
1127 arm_smmu_bypass_stream_mapping(smmu, master);
1128 arm_smmu_master_free_smrs(smmu, master);
1131 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1134 struct arm_smmu_domain *smmu_domain = domain->priv;
1135 struct arm_smmu_device *device_smmu = dev->archdata.iommu;
1136 struct arm_smmu_master *master;
1139 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1144 * Sanity check the domain. We don't currently support domains
1145 * that cross between different SMMU chains.
1147 spin_lock(&smmu_domain->lock);
1148 if (!smmu_domain->leaf_smmu) {
1149 /* Now that we have a master, we can finalise the domain */
1150 ret = arm_smmu_init_domain_context(domain, dev);
1151 if (IS_ERR_VALUE(ret))
1154 smmu_domain->leaf_smmu = device_smmu;
1155 } else if (smmu_domain->leaf_smmu != device_smmu) {
1157 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1158 dev_name(smmu_domain->leaf_smmu->dev),
1159 dev_name(device_smmu->dev));
1162 spin_unlock(&smmu_domain->lock);
1164 /* Looks ok, so add the device to the domain */
1165 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
1169 return arm_smmu_domain_add_master(smmu_domain, master);
1172 spin_unlock(&smmu_domain->lock);
1176 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1178 struct arm_smmu_domain *smmu_domain = domain->priv;
1179 struct arm_smmu_master *master;
1181 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
1183 arm_smmu_domain_remove_master(smmu_domain, master);
1186 static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
1189 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1192 * If the SMMU can't walk tables in the CPU caches, treat them
1193 * like non-coherent DMA since we need to flush the new entries
1194 * all the way out to memory. There's no possibility of recursion
1195 * here as the SMMU table walker will not be wired through another
1198 if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
1199 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
1203 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
1206 return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
1207 (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
1210 static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1211 unsigned long addr, unsigned long end,
1212 unsigned long pfn, int flags, int stage)
1215 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
1217 if (pmd_none(*pmd)) {
1218 /* Allocate a new set of tables */
1219 pgtable_t table = alloc_page(PGALLOC_GFP);
1223 arm_smmu_flush_pgtable(smmu, page_address(table),
1224 ARM_SMMU_PTE_HWTABLE_SIZE);
1225 pgtable_page_ctor(table);
1226 pmd_populate(NULL, pmd, table);
1227 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
1231 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
1232 if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
1233 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1235 if (flags & IOMMU_CACHE)
1236 pteval |= (MAIR_ATTR_IDX_CACHE <<
1237 ARM_SMMU_PTE_ATTRINDX_SHIFT);
1239 pteval |= ARM_SMMU_PTE_HAP_FAULT;
1240 if (flags & IOMMU_READ)
1241 pteval |= ARM_SMMU_PTE_HAP_READ;
1242 if (flags & IOMMU_WRITE)
1243 pteval |= ARM_SMMU_PTE_HAP_WRITE;
1244 if (flags & IOMMU_CACHE)
1245 pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
1247 pteval |= ARM_SMMU_PTE_MEMATTR_NC;
1250 /* If no access, create a faulting entry to avoid TLB fills */
1251 if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
1252 pteval &= ~ARM_SMMU_PTE_PAGE;
1254 pteval |= ARM_SMMU_PTE_SH_IS;
1255 start = pmd_page_vaddr(*pmd) + pte_index(addr);
1259 * Install the page table entries. This is fairly complicated
1260 * since we attempt to make use of the contiguous hint in the
1261 * ptes where possible. The contiguous hint indicates a series
1262 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1263 * contiguous region with the following constraints:
1265 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1266 * - Each pte in the region has the contiguous hint bit set
1268 * This complicates unmapping (also handled by this code, when
1269 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1270 * possible, yet highly unlikely, that a client may unmap only
1271 * part of a contiguous range. This requires clearing of the
1272 * contiguous hint bits in the range before installing the new
1275 * Note that re-mapping an address range without first unmapping
1276 * it is not supported, so TLB invalidation is not required here
1277 * and is instead performed at unmap and domain-init time.
1281 pteval &= ~ARM_SMMU_PTE_CONT;
1283 if (arm_smmu_pte_is_contiguous_range(addr, end)) {
1284 i = ARM_SMMU_PTE_CONT_ENTRIES;
1285 pteval |= ARM_SMMU_PTE_CONT;
1286 } else if (pte_val(*pte) &
1287 (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
1290 unsigned long idx = pte_index(addr);
1292 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
1293 cont_start = pmd_page_vaddr(*pmd) + idx;
1294 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
1295 pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT;
1297 arm_smmu_flush_pgtable(smmu, cont_start,
1299 ARM_SMMU_PTE_CONT_ENTRIES);
1303 *pte = pfn_pte(pfn, __pgprot(pteval));
1304 } while (pte++, pfn++, addr += PAGE_SIZE, --i);
1305 } while (addr != end);
1307 arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
1311 static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1312 unsigned long addr, unsigned long end,
1313 phys_addr_t phys, int flags, int stage)
1317 unsigned long next, pfn = __phys_to_pfn(phys);
1319 #ifndef __PAGETABLE_PMD_FOLDED
1320 if (pud_none(*pud)) {
1321 pmd = pmd_alloc_one(NULL, addr);
1326 pmd = pmd_offset(pud, addr);
1329 next = pmd_addr_end(addr, end);
1330 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
1332 pud_populate(NULL, pud, pmd);
1333 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1334 phys += next - addr;
1335 } while (pmd++, addr = next, addr < end);
1340 static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1341 unsigned long addr, unsigned long end,
1342 phys_addr_t phys, int flags, int stage)
1348 #ifndef __PAGETABLE_PUD_FOLDED
1349 if (pgd_none(*pgd)) {
1350 pud = pud_alloc_one(NULL, addr);
1355 pud = pud_offset(pgd, addr);
1358 next = pud_addr_end(addr, end);
1359 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1361 pgd_populate(NULL, pud, pgd);
1362 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1363 phys += next - addr;
1364 } while (pud++, addr = next, addr < end);
1369 static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1370 unsigned long iova, phys_addr_t paddr,
1371 size_t size, int flags)
1375 phys_addr_t input_mask, output_mask;
1376 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1377 pgd_t *pgd = root_cfg->pgd;
1378 struct arm_smmu_device *smmu = root_cfg->smmu;
1380 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
1382 output_mask = (1ULL << smmu->s2_output_size) - 1;
1385 output_mask = (1ULL << smmu->s1_output_size) - 1;
1391 if (size & ~PAGE_MASK)
1394 input_mask = (1ULL << smmu->input_size) - 1;
1395 if ((phys_addr_t)iova & ~input_mask)
1398 if (paddr & ~output_mask)
1401 spin_lock(&smmu_domain->lock);
1402 pgd += pgd_index(iova);
1405 unsigned long next = pgd_addr_end(iova, end);
1407 ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
1412 paddr += next - iova;
1414 } while (pgd++, iova != end);
1417 spin_unlock(&smmu_domain->lock);
1419 /* Ensure new page tables are visible to the hardware walker */
1420 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1426 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1427 phys_addr_t paddr, size_t size, int flags)
1429 struct arm_smmu_domain *smmu_domain = domain->priv;
1430 struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
1432 if (!smmu_domain || !smmu)
1435 /* Check for silent address truncation up the SMMU chain. */
1436 if ((phys_addr_t)iova & ~smmu_domain->output_mask)
1439 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
1442 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1446 struct arm_smmu_domain *smmu_domain = domain->priv;
1448 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1449 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
1450 return ret ? ret : size;
1453 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1460 struct arm_smmu_domain *smmu_domain = domain->priv;
1461 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1462 struct arm_smmu_device *smmu = root_cfg->smmu;
1464 spin_lock(&smmu_domain->lock);
1465 pgd = root_cfg->pgd;
1469 pgd += pgd_index(iova);
1470 if (pgd_none_or_clear_bad(pgd))
1473 pud = pud_offset(pgd, iova);
1474 if (pud_none_or_clear_bad(pud))
1477 pmd = pmd_offset(pud, iova);
1478 if (pmd_none_or_clear_bad(pmd))
1481 pte = pmd_page_vaddr(*pmd) + pte_index(iova);
1485 spin_unlock(&smmu_domain->lock);
1486 return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1489 spin_unlock(&smmu_domain->lock);
1491 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1492 (unsigned long long)iova);
1496 static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
1499 unsigned long caps = 0;
1500 struct arm_smmu_domain *smmu_domain = domain->priv;
1502 if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1503 caps |= IOMMU_CAP_CACHE_COHERENCY;
1505 return !!(cap & caps);
1508 static int arm_smmu_add_device(struct device *dev)
1510 struct arm_smmu_device *child, *parent, *smmu;
1511 struct arm_smmu_master *master = NULL;
1513 spin_lock(&arm_smmu_devices_lock);
1514 list_for_each_entry(parent, &arm_smmu_devices, list) {
1517 /* Try to find a child of the current SMMU. */
1518 list_for_each_entry(child, &arm_smmu_devices, list) {
1519 if (child->parent_of_node == parent->dev->of_node) {
1520 /* Does the child sit above our master? */
1521 master = find_smmu_master(child, dev->of_node);
1529 /* We found some children, so keep searching. */
1535 master = find_smmu_master(smmu, dev->of_node);
1539 spin_unlock(&arm_smmu_devices_lock);
1544 dev->archdata.iommu = smmu;
1548 static void arm_smmu_remove_device(struct device *dev)
1550 dev->archdata.iommu = NULL;
1553 static struct iommu_ops arm_smmu_ops = {
1554 .domain_init = arm_smmu_domain_init,
1555 .domain_destroy = arm_smmu_domain_destroy,
1556 .attach_dev = arm_smmu_attach_dev,
1557 .detach_dev = arm_smmu_detach_dev,
1558 .map = arm_smmu_map,
1559 .unmap = arm_smmu_unmap,
1560 .iova_to_phys = arm_smmu_iova_to_phys,
1561 .domain_has_cap = arm_smmu_domain_has_cap,
1562 .add_device = arm_smmu_add_device,
1563 .remove_device = arm_smmu_remove_device,
1564 .pgsize_bitmap = (SECTION_SIZE |
1565 ARM_SMMU_PTE_CONT_SIZE |
1569 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1571 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1572 void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
1574 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
1576 /* Mark all SMRn as invalid and all S2CRn as bypass */
1577 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1578 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
1579 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
1582 /* Make sure all context banks are disabled */
1583 for (i = 0; i < smmu->num_context_banks; ++i)
1584 writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
1586 /* Invalidate the TLB, just in case */
1587 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1588 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1589 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1591 /* Enable fault reporting */
1592 scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1594 /* Disable TLB broadcasting. */
1595 scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
1597 /* Enable client access, but bypass when no mapping is found */
1598 scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
1600 /* Disable forced broadcasting */
1603 /* Don't upgrade barriers */
1604 scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1606 /* Push the button */
1607 arm_smmu_tlb_sync(smmu);
1608 writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
1611 static int arm_smmu_id_size_to_bits(int size)
1630 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1633 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1636 dev_notice(smmu->dev, "probing hardware configuration...\n");
1639 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
1640 smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
1641 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1644 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1645 #ifndef CONFIG_64BIT
1646 if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
1647 dev_err(smmu->dev, "\tno v7 descriptor support!\n");
1651 if (id & ID0_S1TS) {
1652 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1653 dev_notice(smmu->dev, "\tstage 1 translation\n");
1656 if (id & ID0_S2TS) {
1657 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1658 dev_notice(smmu->dev, "\tstage 2 translation\n");
1662 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1663 dev_notice(smmu->dev, "\tnested translation\n");
1666 if (!(smmu->features &
1667 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
1668 ARM_SMMU_FEAT_TRANS_NESTED))) {
1669 dev_err(smmu->dev, "\tno translation support!\n");
1673 if (id & ID0_CTTW) {
1674 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1675 dev_notice(smmu->dev, "\tcoherent table walk\n");
1681 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1682 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1684 if (smmu->num_mapping_groups == 0) {
1686 "stream-matching supported, but no SMRs present!\n");
1690 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1691 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1692 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1693 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1695 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1696 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1697 if ((mask & sid) != sid) {
1699 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1704 dev_notice(smmu->dev,
1705 "\tstream matching with %u register groups, mask 0x%x",
1706 smmu->num_mapping_groups, mask);
1710 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1711 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
1713 /* Check that we ioremapped enough */
1714 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1715 size *= (smmu->pagesize << 1);
1716 if (smmu->size < size)
1718 "device is 0x%lx bytes but only mapped 0x%lx!\n",
1721 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
1723 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1724 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1725 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1728 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1729 smmu->num_context_banks, smmu->num_s2_context_banks);
1732 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1733 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1736 * Stage-1 output limited by stage-2 input size due to pgd
1737 * allocation (PTRS_PER_PGD).
1740 /* Current maximum output size of 39 bits */
1741 smmu->s1_output_size = min(39UL, size);
1743 smmu->s1_output_size = min(32UL, size);
1746 /* The stage-2 output mask is also applied for bypass */
1747 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1748 smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size);
1750 if (smmu->version == 1) {
1751 smmu->input_size = 32;
1754 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1755 size = min(39, arm_smmu_id_size_to_bits(size));
1759 smmu->input_size = size;
1761 if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
1762 (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
1763 (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
1764 dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
1770 dev_notice(smmu->dev,
1771 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
1772 smmu->input_size, smmu->s1_output_size, smmu->s2_output_size);
1776 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1778 struct resource *res;
1779 struct arm_smmu_device *smmu;
1780 struct device_node *dev_node;
1781 struct device *dev = &pdev->dev;
1782 struct rb_node *node;
1783 struct of_phandle_args masterspec;
1784 int num_irqs, i, err;
1786 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1788 dev_err(dev, "failed to allocate arm_smmu_device\n");
1793 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1795 dev_err(dev, "missing base address/size\n");
1799 smmu->size = resource_size(res);
1800 smmu->base = devm_request_and_ioremap(dev, res);
1802 return -EADDRNOTAVAIL;
1804 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1805 &smmu->num_global_irqs)) {
1806 dev_err(dev, "missing #global-interrupts property\n");
1811 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1813 if (num_irqs > smmu->num_global_irqs)
1814 smmu->num_context_irqs++;
1817 if (num_irqs < smmu->num_global_irqs) {
1818 dev_warn(dev, "found %d interrupts but expected at least %d\n",
1819 num_irqs, smmu->num_global_irqs);
1820 smmu->num_global_irqs = num_irqs;
1822 smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
1824 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1827 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1831 for (i = 0; i < num_irqs; ++i) {
1832 int irq = platform_get_irq(pdev, i);
1834 dev_err(dev, "failed to get irq index %d\n", i);
1837 smmu->irqs[i] = irq;
1841 smmu->masters = RB_ROOT;
1842 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1843 "#stream-id-cells", i,
1845 err = register_smmu_master(smmu, dev, &masterspec);
1847 dev_err(dev, "failed to add master %s\n",
1848 masterspec.np->name);
1849 goto out_put_masters;
1854 dev_notice(dev, "registered %d master devices\n", i);
1856 if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
1857 smmu->parent_of_node = dev_node;
1859 err = arm_smmu_device_cfg_probe(smmu);
1861 goto out_put_parent;
1863 if (smmu->version > 1 &&
1864 smmu->num_context_banks != smmu->num_context_irqs) {
1866 "found only %d context interrupt(s) but %d required\n",
1867 smmu->num_context_irqs, smmu->num_context_banks);
1868 goto out_put_parent;
1871 arm_smmu_device_reset(smmu);
1873 for (i = 0; i < smmu->num_global_irqs; ++i) {
1874 err = request_irq(smmu->irqs[i],
1875 arm_smmu_global_fault,
1877 "arm-smmu global fault",
1880 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1886 INIT_LIST_HEAD(&smmu->list);
1887 spin_lock(&arm_smmu_devices_lock);
1888 list_add(&smmu->list, &arm_smmu_devices);
1889 spin_unlock(&arm_smmu_devices_lock);
1894 free_irq(smmu->irqs[i], smmu);
1897 if (smmu->parent_of_node)
1898 of_node_put(smmu->parent_of_node);
1901 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1902 struct arm_smmu_master *master;
1903 master = container_of(node, struct arm_smmu_master, node);
1904 of_node_put(master->of_node);
1910 static int arm_smmu_device_remove(struct platform_device *pdev)
1913 struct device *dev = &pdev->dev;
1914 struct arm_smmu_device *curr, *smmu = NULL;
1915 struct rb_node *node;
1917 spin_lock(&arm_smmu_devices_lock);
1918 list_for_each_entry(curr, &arm_smmu_devices, list) {
1919 if (curr->dev == dev) {
1921 list_del(&smmu->list);
1925 spin_unlock(&arm_smmu_devices_lock);
1930 if (smmu->parent_of_node)
1931 of_node_put(smmu->parent_of_node);
1933 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1934 struct arm_smmu_master *master;
1935 master = container_of(node, struct arm_smmu_master, node);
1936 of_node_put(master->of_node);
1939 if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
1940 dev_err(dev, "removing device with active domains!\n");
1942 for (i = 0; i < smmu->num_global_irqs; ++i)
1943 free_irq(smmu->irqs[i], smmu);
1945 /* Turn the thing off */
1946 writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
1951 static struct of_device_id arm_smmu_of_match[] = {
1952 { .compatible = "arm,smmu-v1", },
1953 { .compatible = "arm,smmu-v2", },
1954 { .compatible = "arm,mmu-400", },
1955 { .compatible = "arm,mmu-500", },
1958 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1961 static struct platform_driver arm_smmu_driver = {
1963 .owner = THIS_MODULE,
1965 .of_match_table = of_match_ptr(arm_smmu_of_match),
1967 .probe = arm_smmu_device_dt_probe,
1968 .remove = arm_smmu_device_remove,
1971 static int __init arm_smmu_init(void)
1975 ret = platform_driver_register(&arm_smmu_driver);
1979 /* Oh, for a proper bus abstraction */
1980 if (!iommu_present(&platform_bus_type));
1981 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1983 if (!iommu_present(&amba_bustype));
1984 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1989 static void __exit arm_smmu_exit(void)
1991 return platform_driver_unregister(&arm_smmu_driver);
1994 module_init(arm_smmu_init);
1995 module_exit(arm_smmu_exit);
1997 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1998 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1999 MODULE_LICENSE("GPL v2");