2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
13 static int caam_remove(struct platform_device *pdev)
15 struct device *ctrldev;
16 struct caam_drv_private *ctrlpriv;
17 struct caam_drv_private_jr *jrpriv;
18 struct caam_full __iomem *topregs;
22 ctrlpriv = dev_get_drvdata(ctrldev);
23 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
26 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
27 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
28 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
29 irq_dispose_mapping(jrpriv->irq);
32 /* Shut down debug views */
33 #ifdef CONFIG_DEBUG_FS
34 debugfs_remove_recursive(ctrlpriv->dfs_root);
37 /* Unmap controller region */
38 iounmap(&topregs->ctrl);
40 kfree(ctrlpriv->jrdev);
46 /* Probe routine for CAAM top (controller) level */
47 static int caam_probe(struct platform_device *pdev)
51 struct device_node *nprop, *np;
52 struct caam_ctrl __iomem *ctrl;
53 struct caam_full __iomem *topregs;
54 struct caam_drv_private *ctrlpriv;
55 #ifdef CONFIG_DEBUG_FS
56 struct caam_perfmon *perfmon;
59 ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
64 dev_set_drvdata(dev, ctrlpriv);
65 ctrlpriv->pdev = pdev;
66 nprop = pdev->dev.of_node;
68 /* Get configuration properties from device tree */
69 /* First, get register page */
70 ctrl = of_iomap(nprop, 0);
72 dev_err(dev, "caam: of_iomap() failed\n");
75 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
77 /* topregs used to derive pointers to CAAM sub-blocks only */
78 topregs = (struct caam_full __iomem *)ctrl;
80 /* Get the IRQ of the controller (for security violations only) */
81 ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
84 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
85 * long pointers in master configuration register
87 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
88 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
90 if (sizeof(dma_addr_t) == sizeof(u64))
91 if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
92 dma_set_mask(dev, DMA_BIT_MASK(40));
94 dma_set_mask(dev, DMA_BIT_MASK(36));
96 dma_set_mask(dev, DMA_BIT_MASK(32));
99 * Detect and enable JobRs
100 * First, find out how many ring spec'ed, allocate references
101 * for all, then go probe each one.
104 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
107 /* for backward compatible with device trees */
108 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
112 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
113 if (ctrlpriv->jrdev == NULL) {
114 iounmap(&topregs->ctrl);
119 ctrlpriv->total_jobrs = 0;
120 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
121 caam_jr_probe(pdev, np, ring);
122 ctrlpriv->total_jobrs++;
126 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
127 caam_jr_probe(pdev, np, ring);
128 ctrlpriv->total_jobrs++;
133 /* Check to see if QI present. If so, enable */
134 ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
136 if (ctrlpriv->qi_present) {
137 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
138 /* This is all that's required to physically enable QI */
139 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
142 /* If no QI and no rings specified, quit and go home */
143 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
144 dev_err(dev, "no queues configured, terminating\n");
149 /* NOTE: RTIC detection ought to go here, around Si time */
151 /* Initialize queue allocator lock */
152 spin_lock_init(&ctrlpriv->jr_alloc_lock);
154 /* Report "alive" for developer to see */
155 dev_info(dev, "device ID = 0x%016llx\n",
156 rd_reg64(&topregs->ctrl.perfmon.caam_id));
157 dev_info(dev, "job rings = %d, qi = %d\n",
158 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
160 #ifdef CONFIG_DEBUG_FS
162 * FIXME: needs better naming distinction, as some amalgamation of
163 * "caam" and nprop->full_name. The OF name isn't distinctive,
164 * but does separate instances
166 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
168 ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
169 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
171 /* Controller-level - performance monitor counters */
172 ctrlpriv->ctl_rq_dequeued =
173 debugfs_create_u64("rq_dequeued",
174 S_IRUSR | S_IRGRP | S_IROTH,
175 ctrlpriv->ctl, &perfmon->req_dequeued);
176 ctrlpriv->ctl_ob_enc_req =
177 debugfs_create_u64("ob_rq_encrypted",
178 S_IRUSR | S_IRGRP | S_IROTH,
179 ctrlpriv->ctl, &perfmon->ob_enc_req);
180 ctrlpriv->ctl_ib_dec_req =
181 debugfs_create_u64("ib_rq_decrypted",
182 S_IRUSR | S_IRGRP | S_IROTH,
183 ctrlpriv->ctl, &perfmon->ib_dec_req);
184 ctrlpriv->ctl_ob_enc_bytes =
185 debugfs_create_u64("ob_bytes_encrypted",
186 S_IRUSR | S_IRGRP | S_IROTH,
187 ctrlpriv->ctl, &perfmon->ob_enc_bytes);
188 ctrlpriv->ctl_ob_prot_bytes =
189 debugfs_create_u64("ob_bytes_protected",
190 S_IRUSR | S_IRGRP | S_IROTH,
191 ctrlpriv->ctl, &perfmon->ob_prot_bytes);
192 ctrlpriv->ctl_ib_dec_bytes =
193 debugfs_create_u64("ib_bytes_decrypted",
194 S_IRUSR | S_IRGRP | S_IROTH,
195 ctrlpriv->ctl, &perfmon->ib_dec_bytes);
196 ctrlpriv->ctl_ib_valid_bytes =
197 debugfs_create_u64("ib_bytes_validated",
198 S_IRUSR | S_IRGRP | S_IROTH,
199 ctrlpriv->ctl, &perfmon->ib_valid_bytes);
201 /* Controller level - global status values */
202 ctrlpriv->ctl_faultaddr =
203 debugfs_create_u64("fault_addr",
204 S_IRUSR | S_IRGRP | S_IROTH,
205 ctrlpriv->ctl, &perfmon->faultaddr);
206 ctrlpriv->ctl_faultdetail =
207 debugfs_create_u32("fault_detail",
208 S_IRUSR | S_IRGRP | S_IROTH,
209 ctrlpriv->ctl, &perfmon->faultdetail);
210 ctrlpriv->ctl_faultstatus =
211 debugfs_create_u32("fault_status",
212 S_IRUSR | S_IRGRP | S_IROTH,
213 ctrlpriv->ctl, &perfmon->status);
215 /* Internal covering keys (useful in non-secure mode only) */
216 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
217 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
218 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
222 &ctrlpriv->ctl_kek_wrap);
224 ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
225 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
226 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
230 &ctrlpriv->ctl_tkek_wrap);
232 ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
233 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
234 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
238 &ctrlpriv->ctl_tdsk_wrap);
243 static struct of_device_id caam_match[] = {
245 .compatible = "fsl,sec-v4.0",
248 .compatible = "fsl,sec4.0",
252 MODULE_DEVICE_TABLE(of, caam_match);
254 static struct platform_driver caam_driver = {
257 .owner = THIS_MODULE,
258 .of_match_table = caam_match,
261 .remove = __devexit_p(caam_remove),
264 module_platform_driver(caam_driver);
266 MODULE_LICENSE("GPL");
267 MODULE_DESCRIPTION("FSL CAAM request backend");
268 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");