]> Pileus Git - ~andy/linux/blob - drivers/crypto/talitos.c
crypto: talitos - Add ablkcipher algorithms
[~andy/linux] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/io.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40
41 #include <crypto/algapi.h>
42 #include <crypto/aes.h>
43 #include <crypto/des.h>
44 #include <crypto/sha.h>
45 #include <crypto/aead.h>
46 #include <crypto/authenc.h>
47 #include <crypto/skcipher.h>
48 #include <crypto/scatterwalk.h>
49
50 #include "talitos.h"
51
52 #define TALITOS_TIMEOUT 100000
53 #define TALITOS_MAX_DATA_LEN 65535
54
55 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
56 #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
57 #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
58
59 /* descriptor pointer entry */
60 struct talitos_ptr {
61         __be16 len;     /* length */
62         u8 j_extent;    /* jump to sg link table and/or extent */
63         u8 eptr;        /* extended address */
64         __be32 ptr;     /* address */
65 };
66
67 /* descriptor */
68 struct talitos_desc {
69         __be32 hdr;                     /* header high bits */
70         __be32 hdr_lo;                  /* header low bits */
71         struct talitos_ptr ptr[7];      /* ptr/len pair array */
72 };
73
74 /**
75  * talitos_request - descriptor submission request
76  * @desc: descriptor pointer (kernel virtual)
77  * @dma_desc: descriptor's physical bus address
78  * @callback: whom to call when descriptor processing is done
79  * @context: caller context (optional)
80  */
81 struct talitos_request {
82         struct talitos_desc *desc;
83         dma_addr_t dma_desc;
84         void (*callback) (struct device *dev, struct talitos_desc *desc,
85                           void *context, int error);
86         void *context;
87 };
88
89 struct talitos_private {
90         struct device *dev;
91         struct of_device *ofdev;
92         void __iomem *reg;
93         int irq;
94
95         /* SEC version geometry (from device tree node) */
96         unsigned int num_channels;
97         unsigned int chfifo_len;
98         unsigned int exec_units;
99         unsigned int desc_types;
100
101         /* SEC Compatibility info */
102         unsigned long features;
103
104         /* next channel to be assigned next incoming descriptor */
105         atomic_t last_chan;
106
107         /* per-channel number of requests pending in channel h/w fifo */
108         atomic_t *submit_count;
109
110         /* per-channel request fifo */
111         struct talitos_request **fifo;
112
113         /*
114          * length of the request fifo
115          * fifo_len is chfifo_len rounded up to next power of 2
116          * so we can use bitwise ops to wrap
117          */
118         unsigned int fifo_len;
119
120         /* per-channel index to next free descriptor request */
121         int *head;
122
123         /* per-channel index to next in-progress/done descriptor request */
124         int *tail;
125
126         /* per-channel request submission (head) and release (tail) locks */
127         spinlock_t *head_lock;
128         spinlock_t *tail_lock;
129
130         /* request callback tasklet */
131         struct tasklet_struct done_task;
132
133         /* list of registered algorithms */
134         struct list_head alg_list;
135
136         /* hwrng device */
137         struct hwrng rng;
138 };
139
140 /* .features flag */
141 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
142 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
143
144 /*
145  * map virtual single (contiguous) pointer to h/w descriptor pointer
146  */
147 static void map_single_talitos_ptr(struct device *dev,
148                                    struct talitos_ptr *talitos_ptr,
149                                    unsigned short len, void *data,
150                                    unsigned char extent,
151                                    enum dma_data_direction dir)
152 {
153         talitos_ptr->len = cpu_to_be16(len);
154         talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
155         talitos_ptr->j_extent = extent;
156 }
157
158 /*
159  * unmap bus single (contiguous) h/w descriptor pointer
160  */
161 static void unmap_single_talitos_ptr(struct device *dev,
162                                      struct talitos_ptr *talitos_ptr,
163                                      enum dma_data_direction dir)
164 {
165         dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
166                          be16_to_cpu(talitos_ptr->len), dir);
167 }
168
169 static int reset_channel(struct device *dev, int ch)
170 {
171         struct talitos_private *priv = dev_get_drvdata(dev);
172         unsigned int timeout = TALITOS_TIMEOUT;
173
174         setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
175
176         while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
177                && --timeout)
178                 cpu_relax();
179
180         if (timeout == 0) {
181                 dev_err(dev, "failed to reset channel %d\n", ch);
182                 return -EIO;
183         }
184
185         /* set done writeback and IRQ */
186         setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
187                   TALITOS_CCCR_LO_CDIE);
188
189         /* and ICCR writeback, if available */
190         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
191                 setbits32(priv->reg + TALITOS_CCCR_LO(ch),
192                           TALITOS_CCCR_LO_IWSE);
193
194         return 0;
195 }
196
197 static int reset_device(struct device *dev)
198 {
199         struct talitos_private *priv = dev_get_drvdata(dev);
200         unsigned int timeout = TALITOS_TIMEOUT;
201
202         setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
203
204         while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
205                && --timeout)
206                 cpu_relax();
207
208         if (timeout == 0) {
209                 dev_err(dev, "failed to reset device\n");
210                 return -EIO;
211         }
212
213         return 0;
214 }
215
216 /*
217  * Reset and initialize the device
218  */
219 static int init_device(struct device *dev)
220 {
221         struct talitos_private *priv = dev_get_drvdata(dev);
222         int ch, err;
223
224         /*
225          * Master reset
226          * errata documentation: warning: certain SEC interrupts
227          * are not fully cleared by writing the MCR:SWR bit,
228          * set bit twice to completely reset
229          */
230         err = reset_device(dev);
231         if (err)
232                 return err;
233
234         err = reset_device(dev);
235         if (err)
236                 return err;
237
238         /* reset channels */
239         for (ch = 0; ch < priv->num_channels; ch++) {
240                 err = reset_channel(dev, ch);
241                 if (err)
242                         return err;
243         }
244
245         /* enable channel done and error interrupts */
246         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
247         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
248
249         /* disable integrity check error interrupts (use writeback instead) */
250         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
251                 setbits32(priv->reg + TALITOS_MDEUICR_LO,
252                           TALITOS_MDEUICR_LO_ICE);
253
254         return 0;
255 }
256
257 /**
258  * talitos_submit - submits a descriptor to the device for processing
259  * @dev:        the SEC device to be used
260  * @desc:       the descriptor to be processed by the device
261  * @callback:   whom to call when processing is complete
262  * @context:    a handle for use by caller (optional)
263  *
264  * desc must contain valid dma-mapped (bus physical) address pointers.
265  * callback must check err and feedback in descriptor header
266  * for device processing status.
267  */
268 static int talitos_submit(struct device *dev, struct talitos_desc *desc,
269                           void (*callback)(struct device *dev,
270                                            struct talitos_desc *desc,
271                                            void *context, int error),
272                           void *context)
273 {
274         struct talitos_private *priv = dev_get_drvdata(dev);
275         struct talitos_request *request;
276         unsigned long flags, ch;
277         int head;
278
279         /* select done notification */
280         desc->hdr |= DESC_HDR_DONE_NOTIFY;
281
282         /* emulate SEC's round-robin channel fifo polling scheme */
283         ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
284
285         spin_lock_irqsave(&priv->head_lock[ch], flags);
286
287         if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
288                 /* h/w fifo is full */
289                 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
290                 return -EAGAIN;
291         }
292
293         head = priv->head[ch];
294         request = &priv->fifo[ch][head];
295
296         /* map descriptor and save caller data */
297         request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
298                                            DMA_BIDIRECTIONAL);
299         request->callback = callback;
300         request->context = context;
301
302         /* increment fifo head */
303         priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
304
305         smp_wmb();
306         request->desc = desc;
307
308         /* GO! */
309         wmb();
310         out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
311
312         spin_unlock_irqrestore(&priv->head_lock[ch], flags);
313
314         return -EINPROGRESS;
315 }
316
317 /*
318  * process what was done, notify callback of error if not
319  */
320 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
321 {
322         struct talitos_private *priv = dev_get_drvdata(dev);
323         struct talitos_request *request, saved_req;
324         unsigned long flags;
325         int tail, status;
326
327         spin_lock_irqsave(&priv->tail_lock[ch], flags);
328
329         tail = priv->tail[ch];
330         while (priv->fifo[ch][tail].desc) {
331                 request = &priv->fifo[ch][tail];
332
333                 /* descriptors with their done bits set don't get the error */
334                 rmb();
335                 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
336                         status = 0;
337                 else
338                         if (!error)
339                                 break;
340                         else
341                                 status = error;
342
343                 dma_unmap_single(dev, request->dma_desc,
344                         sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
345
346                 /* copy entries so we can call callback outside lock */
347                 saved_req.desc = request->desc;
348                 saved_req.callback = request->callback;
349                 saved_req.context = request->context;
350
351                 /* release request entry in fifo */
352                 smp_wmb();
353                 request->desc = NULL;
354
355                 /* increment fifo tail */
356                 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
357
358                 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
359
360                 atomic_dec(&priv->submit_count[ch]);
361
362                 saved_req.callback(dev, saved_req.desc, saved_req.context,
363                                    status);
364                 /* channel may resume processing in single desc error case */
365                 if (error && !reset_ch && status == error)
366                         return;
367                 spin_lock_irqsave(&priv->tail_lock[ch], flags);
368                 tail = priv->tail[ch];
369         }
370
371         spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
372 }
373
374 /*
375  * process completed requests for channels that have done status
376  */
377 static void talitos_done(unsigned long data)
378 {
379         struct device *dev = (struct device *)data;
380         struct talitos_private *priv = dev_get_drvdata(dev);
381         int ch;
382
383         for (ch = 0; ch < priv->num_channels; ch++)
384                 flush_channel(dev, ch, 0, 0);
385
386         /* At this point, all completed channels have been processed.
387          * Unmask done interrupts for channels completed later on.
388          */
389         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
390         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
391 }
392
393 /*
394  * locate current (offending) descriptor
395  */
396 static struct talitos_desc *current_desc(struct device *dev, int ch)
397 {
398         struct talitos_private *priv = dev_get_drvdata(dev);
399         int tail = priv->tail[ch];
400         dma_addr_t cur_desc;
401
402         cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
403
404         while (priv->fifo[ch][tail].dma_desc != cur_desc) {
405                 tail = (tail + 1) & (priv->fifo_len - 1);
406                 if (tail == priv->tail[ch]) {
407                         dev_err(dev, "couldn't locate current descriptor\n");
408                         return NULL;
409                 }
410         }
411
412         return priv->fifo[ch][tail].desc;
413 }
414
415 /*
416  * user diagnostics; report root cause of error based on execution unit status
417  */
418 static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
419 {
420         struct talitos_private *priv = dev_get_drvdata(dev);
421         int i;
422
423         switch (desc->hdr & DESC_HDR_SEL0_MASK) {
424         case DESC_HDR_SEL0_AFEU:
425                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
426                         in_be32(priv->reg + TALITOS_AFEUISR),
427                         in_be32(priv->reg + TALITOS_AFEUISR_LO));
428                 break;
429         case DESC_HDR_SEL0_DEU:
430                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
431                         in_be32(priv->reg + TALITOS_DEUISR),
432                         in_be32(priv->reg + TALITOS_DEUISR_LO));
433                 break;
434         case DESC_HDR_SEL0_MDEUA:
435         case DESC_HDR_SEL0_MDEUB:
436                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
437                         in_be32(priv->reg + TALITOS_MDEUISR),
438                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
439                 break;
440         case DESC_HDR_SEL0_RNG:
441                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
442                         in_be32(priv->reg + TALITOS_RNGUISR),
443                         in_be32(priv->reg + TALITOS_RNGUISR_LO));
444                 break;
445         case DESC_HDR_SEL0_PKEU:
446                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
447                         in_be32(priv->reg + TALITOS_PKEUISR),
448                         in_be32(priv->reg + TALITOS_PKEUISR_LO));
449                 break;
450         case DESC_HDR_SEL0_AESU:
451                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
452                         in_be32(priv->reg + TALITOS_AESUISR),
453                         in_be32(priv->reg + TALITOS_AESUISR_LO));
454                 break;
455         case DESC_HDR_SEL0_CRCU:
456                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
457                         in_be32(priv->reg + TALITOS_CRCUISR),
458                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
459                 break;
460         case DESC_HDR_SEL0_KEU:
461                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
462                         in_be32(priv->reg + TALITOS_KEUISR),
463                         in_be32(priv->reg + TALITOS_KEUISR_LO));
464                 break;
465         }
466
467         switch (desc->hdr & DESC_HDR_SEL1_MASK) {
468         case DESC_HDR_SEL1_MDEUA:
469         case DESC_HDR_SEL1_MDEUB:
470                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
471                         in_be32(priv->reg + TALITOS_MDEUISR),
472                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
473                 break;
474         case DESC_HDR_SEL1_CRCU:
475                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
476                         in_be32(priv->reg + TALITOS_CRCUISR),
477                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
478                 break;
479         }
480
481         for (i = 0; i < 8; i++)
482                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
483                         in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
484                         in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
485 }
486
487 /*
488  * recover from error interrupts
489  */
490 static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
491 {
492         struct device *dev = (struct device *)data;
493         struct talitos_private *priv = dev_get_drvdata(dev);
494         unsigned int timeout = TALITOS_TIMEOUT;
495         int ch, error, reset_dev = 0, reset_ch = 0;
496         u32 v, v_lo;
497
498         for (ch = 0; ch < priv->num_channels; ch++) {
499                 /* skip channels without errors */
500                 if (!(isr & (1 << (ch * 2 + 1))))
501                         continue;
502
503                 error = -EINVAL;
504
505                 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
506                 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
507
508                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
509                         dev_err(dev, "double fetch fifo overflow error\n");
510                         error = -EAGAIN;
511                         reset_ch = 1;
512                 }
513                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
514                         /* h/w dropped descriptor */
515                         dev_err(dev, "single fetch fifo overflow error\n");
516                         error = -EAGAIN;
517                 }
518                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
519                         dev_err(dev, "master data transfer error\n");
520                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
521                         dev_err(dev, "s/g data length zero error\n");
522                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
523                         dev_err(dev, "fetch pointer zero error\n");
524                 if (v_lo & TALITOS_CCPSR_LO_IDH)
525                         dev_err(dev, "illegal descriptor header error\n");
526                 if (v_lo & TALITOS_CCPSR_LO_IEU)
527                         dev_err(dev, "invalid execution unit error\n");
528                 if (v_lo & TALITOS_CCPSR_LO_EU)
529                         report_eu_error(dev, ch, current_desc(dev, ch));
530                 if (v_lo & TALITOS_CCPSR_LO_GB)
531                         dev_err(dev, "gather boundary error\n");
532                 if (v_lo & TALITOS_CCPSR_LO_GRL)
533                         dev_err(dev, "gather return/length error\n");
534                 if (v_lo & TALITOS_CCPSR_LO_SB)
535                         dev_err(dev, "scatter boundary error\n");
536                 if (v_lo & TALITOS_CCPSR_LO_SRL)
537                         dev_err(dev, "scatter return/length error\n");
538
539                 flush_channel(dev, ch, error, reset_ch);
540
541                 if (reset_ch) {
542                         reset_channel(dev, ch);
543                 } else {
544                         setbits32(priv->reg + TALITOS_CCCR(ch),
545                                   TALITOS_CCCR_CONT);
546                         setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
547                         while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
548                                TALITOS_CCCR_CONT) && --timeout)
549                                 cpu_relax();
550                         if (timeout == 0) {
551                                 dev_err(dev, "failed to restart channel %d\n",
552                                         ch);
553                                 reset_dev = 1;
554                         }
555                 }
556         }
557         if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
558                 dev_err(dev, "done overflow, internal time out, or rngu error: "
559                         "ISR 0x%08x_%08x\n", isr, isr_lo);
560
561                 /* purge request queues */
562                 for (ch = 0; ch < priv->num_channels; ch++)
563                         flush_channel(dev, ch, -EIO, 1);
564
565                 /* reset and reinitialize the device */
566                 init_device(dev);
567         }
568 }
569
570 static irqreturn_t talitos_interrupt(int irq, void *data)
571 {
572         struct device *dev = data;
573         struct talitos_private *priv = dev_get_drvdata(dev);
574         u32 isr, isr_lo;
575
576         isr = in_be32(priv->reg + TALITOS_ISR);
577         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
578         /* Acknowledge interrupt */
579         out_be32(priv->reg + TALITOS_ICR, isr);
580         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
581
582         if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
583                 talitos_error((unsigned long)data, isr, isr_lo);
584         else
585                 if (likely(isr & TALITOS_ISR_CHDONE)) {
586                         /* mask further done interrupts. */
587                         clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
588                         /* done_task will unmask done interrupts at exit */
589                         tasklet_schedule(&priv->done_task);
590                 }
591
592         return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
593 }
594
595 /*
596  * hwrng
597  */
598 static int talitos_rng_data_present(struct hwrng *rng, int wait)
599 {
600         struct device *dev = (struct device *)rng->priv;
601         struct talitos_private *priv = dev_get_drvdata(dev);
602         u32 ofl;
603         int i;
604
605         for (i = 0; i < 20; i++) {
606                 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
607                       TALITOS_RNGUSR_LO_OFL;
608                 if (ofl || !wait)
609                         break;
610                 udelay(10);
611         }
612
613         return !!ofl;
614 }
615
616 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
617 {
618         struct device *dev = (struct device *)rng->priv;
619         struct talitos_private *priv = dev_get_drvdata(dev);
620
621         /* rng fifo requires 64-bit accesses */
622         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
623         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
624
625         return sizeof(u32);
626 }
627
628 static int talitos_rng_init(struct hwrng *rng)
629 {
630         struct device *dev = (struct device *)rng->priv;
631         struct talitos_private *priv = dev_get_drvdata(dev);
632         unsigned int timeout = TALITOS_TIMEOUT;
633
634         setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
635         while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
636                && --timeout)
637                 cpu_relax();
638         if (timeout == 0) {
639                 dev_err(dev, "failed to reset rng hw\n");
640                 return -ENODEV;
641         }
642
643         /* start generating */
644         setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
645
646         return 0;
647 }
648
649 static int talitos_register_rng(struct device *dev)
650 {
651         struct talitos_private *priv = dev_get_drvdata(dev);
652
653         priv->rng.name          = dev_driver_string(dev),
654         priv->rng.init          = talitos_rng_init,
655         priv->rng.data_present  = talitos_rng_data_present,
656         priv->rng.data_read     = talitos_rng_data_read,
657         priv->rng.priv          = (unsigned long)dev;
658
659         return hwrng_register(&priv->rng);
660 }
661
662 static void talitos_unregister_rng(struct device *dev)
663 {
664         struct talitos_private *priv = dev_get_drvdata(dev);
665
666         hwrng_unregister(&priv->rng);
667 }
668
669 /*
670  * crypto alg
671  */
672 #define TALITOS_CRA_PRIORITY            3000
673 #define TALITOS_MAX_KEY_SIZE            64
674 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
675
676 #define MD5_DIGEST_SIZE   16
677
678 struct talitos_ctx {
679         struct device *dev;
680         __be32 desc_hdr_template;
681         u8 key[TALITOS_MAX_KEY_SIZE];
682         u8 iv[TALITOS_MAX_IV_LENGTH];
683         unsigned int keylen;
684         unsigned int enckeylen;
685         unsigned int authkeylen;
686         unsigned int authsize;
687 };
688
689 static int aead_setauthsize(struct crypto_aead *authenc,
690                             unsigned int authsize)
691 {
692         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
693
694         ctx->authsize = authsize;
695
696         return 0;
697 }
698
699 static int aead_setkey(struct crypto_aead *authenc,
700                        const u8 *key, unsigned int keylen)
701 {
702         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
703         struct rtattr *rta = (void *)key;
704         struct crypto_authenc_key_param *param;
705         unsigned int authkeylen;
706         unsigned int enckeylen;
707
708         if (!RTA_OK(rta, keylen))
709                 goto badkey;
710
711         if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
712                 goto badkey;
713
714         if (RTA_PAYLOAD(rta) < sizeof(*param))
715                 goto badkey;
716
717         param = RTA_DATA(rta);
718         enckeylen = be32_to_cpu(param->enckeylen);
719
720         key += RTA_ALIGN(rta->rta_len);
721         keylen -= RTA_ALIGN(rta->rta_len);
722
723         if (keylen < enckeylen)
724                 goto badkey;
725
726         authkeylen = keylen - enckeylen;
727
728         if (keylen > TALITOS_MAX_KEY_SIZE)
729                 goto badkey;
730
731         memcpy(&ctx->key, key, keylen);
732
733         ctx->keylen = keylen;
734         ctx->enckeylen = enckeylen;
735         ctx->authkeylen = authkeylen;
736
737         return 0;
738
739 badkey:
740         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
741         return -EINVAL;
742 }
743
744 /*
745  * talitos_edesc - s/w-extended descriptor
746  * @src_nents: number of segments in input scatterlist
747  * @dst_nents: number of segments in output scatterlist
748  * @dma_len: length of dma mapped link_tbl space
749  * @dma_link_tbl: bus physical address of link_tbl
750  * @desc: h/w descriptor
751  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
752  *
753  * if decrypting (with authcheck), or either one of src_nents or dst_nents
754  * is greater than 1, an integrity check value is concatenated to the end
755  * of link_tbl data
756  */
757 struct talitos_edesc {
758         int src_nents;
759         int dst_nents;
760         int src_is_chained;
761         int dst_is_chained;
762         int dma_len;
763         dma_addr_t dma_link_tbl;
764         struct talitos_desc desc;
765         struct talitos_ptr link_tbl[0];
766 };
767
768 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
769                           unsigned int nents, enum dma_data_direction dir,
770                           int chained)
771 {
772         if (unlikely(chained))
773                 while (sg) {
774                         dma_map_sg(dev, sg, 1, dir);
775                         sg = scatterwalk_sg_next(sg);
776                 }
777         else
778                 dma_map_sg(dev, sg, nents, dir);
779         return nents;
780 }
781
782 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
783                                    enum dma_data_direction dir)
784 {
785         while (sg) {
786                 dma_unmap_sg(dev, sg, 1, dir);
787                 sg = scatterwalk_sg_next(sg);
788         }
789 }
790
791 static void talitos_sg_unmap(struct device *dev,
792                              struct talitos_edesc *edesc,
793                              struct scatterlist *src,
794                              struct scatterlist *dst)
795 {
796         unsigned int src_nents = edesc->src_nents ? : 1;
797         unsigned int dst_nents = edesc->dst_nents ? : 1;
798
799         if (src != dst) {
800                 if (edesc->src_is_chained)
801                         talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
802                 else
803                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
804
805                 if (edesc->dst_is_chained)
806                         talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
807                 else
808                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
809         } else
810                 if (edesc->src_is_chained)
811                         talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
812                 else
813                         dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
814 }
815
816 static void ipsec_esp_unmap(struct device *dev,
817                             struct talitos_edesc *edesc,
818                             struct aead_request *areq)
819 {
820         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
821         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
822         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
823         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
824
825         dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
826
827         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
828
829         if (edesc->dma_len)
830                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
831                                  DMA_BIDIRECTIONAL);
832 }
833
834 /*
835  * ipsec_esp descriptor callbacks
836  */
837 static void ipsec_esp_encrypt_done(struct device *dev,
838                                    struct talitos_desc *desc, void *context,
839                                    int err)
840 {
841         struct aead_request *areq = context;
842         struct talitos_edesc *edesc =
843                  container_of(desc, struct talitos_edesc, desc);
844         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
845         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
846         struct scatterlist *sg;
847         void *icvdata;
848
849         ipsec_esp_unmap(dev, edesc, areq);
850
851         /* copy the generated ICV to dst */
852         if (edesc->dma_len) {
853                 icvdata = &edesc->link_tbl[edesc->src_nents +
854                                            edesc->dst_nents + 2];
855                 sg = sg_last(areq->dst, edesc->dst_nents);
856                 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
857                        icvdata, ctx->authsize);
858         }
859
860         kfree(edesc);
861
862         aead_request_complete(areq, err);
863 }
864
865 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
866                                    struct talitos_desc *desc, void *context,
867                                    int err)
868 {
869         struct aead_request *req = context;
870         struct talitos_edesc *edesc =
871                  container_of(desc, struct talitos_edesc, desc);
872         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
873         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
874         struct scatterlist *sg;
875         void *icvdata;
876
877         ipsec_esp_unmap(dev, edesc, req);
878
879         if (!err) {
880                 /* auth check */
881                 if (edesc->dma_len)
882                         icvdata = &edesc->link_tbl[edesc->src_nents +
883                                                    edesc->dst_nents + 2];
884                 else
885                         icvdata = &edesc->link_tbl[0];
886
887                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
888                 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
889                              ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
890         }
891
892         kfree(edesc);
893
894         aead_request_complete(req, err);
895 }
896
897 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
898                                    struct talitos_desc *desc, void *context,
899                                    int err)
900 {
901         struct aead_request *req = context;
902         struct talitos_edesc *edesc =
903                  container_of(desc, struct talitos_edesc, desc);
904
905         ipsec_esp_unmap(dev, edesc, req);
906
907         /* check ICV auth status */
908         if (!err)
909                 if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
910                     DESC_HDR_LO_ICCR1_PASS)
911                         err = -EBADMSG;
912
913         kfree(edesc);
914
915         aead_request_complete(req, err);
916 }
917
918 /*
919  * convert scatterlist to SEC h/w link table format
920  * stop at cryptlen bytes
921  */
922 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
923                            int cryptlen, struct talitos_ptr *link_tbl_ptr)
924 {
925         int n_sg = sg_count;
926
927         while (n_sg--) {
928                 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
929                 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
930                 link_tbl_ptr->j_extent = 0;
931                 link_tbl_ptr++;
932                 cryptlen -= sg_dma_len(sg);
933                 sg = scatterwalk_sg_next(sg);
934         }
935
936         /* adjust (decrease) last one (or two) entry's len to cryptlen */
937         link_tbl_ptr--;
938         while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
939                 /* Empty this entry, and move to previous one */
940                 cryptlen += be16_to_cpu(link_tbl_ptr->len);
941                 link_tbl_ptr->len = 0;
942                 sg_count--;
943                 link_tbl_ptr--;
944         }
945         link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
946                                         + cryptlen);
947
948         /* tag end of link table */
949         link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
950
951         return sg_count;
952 }
953
954 /*
955  * fill in and submit ipsec_esp descriptor
956  */
957 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
958                      u8 *giv, u64 seq,
959                      void (*callback) (struct device *dev,
960                                        struct talitos_desc *desc,
961                                        void *context, int error))
962 {
963         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
964         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
965         struct device *dev = ctx->dev;
966         struct talitos_desc *desc = &edesc->desc;
967         unsigned int cryptlen = areq->cryptlen;
968         unsigned int authsize = ctx->authsize;
969         unsigned int ivsize;
970         int sg_count, ret;
971         int sg_link_tbl_len;
972
973         /* hmac key */
974         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
975                                0, DMA_TO_DEVICE);
976         /* hmac data */
977         map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
978                                sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
979                                DMA_TO_DEVICE);
980         /* cipher iv */
981         ivsize = crypto_aead_ivsize(aead);
982         map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
983                                DMA_TO_DEVICE);
984
985         /* cipher key */
986         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
987                                (char *)&ctx->key + ctx->authkeylen, 0,
988                                DMA_TO_DEVICE);
989
990         /*
991          * cipher in
992          * map and adjust cipher len to aead request cryptlen.
993          * extent is bytes of HMAC postpended to ciphertext,
994          * typically 12 for ipsec
995          */
996         desc->ptr[4].len = cpu_to_be16(cryptlen);
997         desc->ptr[4].j_extent = authsize;
998
999         sg_count = talitos_map_sg(dev, areq->src,
1000                                   edesc->src_nents ? : 1,
1001                                   (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1002                                       DMA_TO_DEVICE,
1003                                   edesc->src_is_chained);
1004
1005         if (sg_count == 1) {
1006                 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
1007         } else {
1008                 sg_link_tbl_len = cryptlen;
1009
1010                 if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) &&
1011                         (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
1012                         sg_link_tbl_len = cryptlen + authsize;
1013                 }
1014                 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1015                                           &edesc->link_tbl[0]);
1016                 if (sg_count > 1) {
1017                         desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1018                         desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
1019                         dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1020                                                    edesc->dma_len, DMA_BIDIRECTIONAL);
1021                 } else {
1022                         /* Only one segment now, so no link tbl needed */
1023                         desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
1024                 }
1025         }
1026
1027         /* cipher out */
1028         desc->ptr[5].len = cpu_to_be16(cryptlen);
1029         desc->ptr[5].j_extent = authsize;
1030
1031         if (areq->src != areq->dst) {
1032                 sg_count = talitos_map_sg(dev, areq->dst,
1033                                           edesc->dst_nents ? : 1,
1034                                           DMA_FROM_DEVICE,
1035                                           edesc->dst_is_chained);
1036         }
1037
1038         if (sg_count == 1) {
1039                 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1040         } else {
1041                 struct talitos_ptr *link_tbl_ptr =
1042                         &edesc->link_tbl[edesc->src_nents + 1];
1043
1044                 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
1045                                                edesc->dma_link_tbl +
1046                                                edesc->src_nents + 1);
1047                 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1048                                           link_tbl_ptr);
1049
1050                 /* Add an entry to the link table for ICV data */
1051                 link_tbl_ptr += sg_count - 1;
1052                 link_tbl_ptr->j_extent = 0;
1053                 sg_count++;
1054                 link_tbl_ptr++;
1055                 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1056                 link_tbl_ptr->len = cpu_to_be16(authsize);
1057
1058                 /* icv data follows link tables */
1059                 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
1060                                                 edesc->dma_link_tbl +
1061                                                 edesc->src_nents +
1062                                                 edesc->dst_nents + 2);
1063
1064                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1065                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1066                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1067         }
1068
1069         /* iv out */
1070         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1071                                DMA_FROM_DEVICE);
1072
1073         ret = talitos_submit(dev, desc, callback, areq);
1074         if (ret != -EINPROGRESS) {
1075                 ipsec_esp_unmap(dev, edesc, areq);
1076                 kfree(edesc);
1077         }
1078         return ret;
1079 }
1080
1081
1082 /*
1083  * derive number of elements in scatterlist
1084  */
1085 static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1086 {
1087         struct scatterlist *sg = sg_list;
1088         int sg_nents = 0;
1089
1090         *chained = 0;
1091         while (nbytes > 0) {
1092                 sg_nents++;
1093                 nbytes -= sg->length;
1094                 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1095                         *chained = 1;
1096                 sg = scatterwalk_sg_next(sg);
1097         }
1098
1099         return sg_nents;
1100 }
1101
1102 /*
1103  * allocate and map the extended descriptor
1104  */
1105 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1106                                                  struct scatterlist *src,
1107                                                  struct scatterlist *dst,
1108                                                  unsigned int cryptlen,
1109                                                  unsigned int authsize,
1110                                                  int icv_stashing,
1111                                                  u32 cryptoflags)
1112 {
1113         struct talitos_edesc *edesc;
1114         int src_nents, dst_nents, alloc_len, dma_len;
1115         int src_chained, dst_chained = 0;
1116         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1117                       GFP_ATOMIC;
1118
1119         if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1120                 dev_err(dev, "length exceeds h/w max limit\n");
1121                 return ERR_PTR(-EINVAL);
1122         }
1123
1124         src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1125         src_nents = (src_nents == 1) ? 0 : src_nents;
1126
1127         if (dst == src) {
1128                 dst_nents = src_nents;
1129         } else {
1130                 dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
1131                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1132         }
1133
1134         /*
1135          * allocate space for base edesc plus the link tables,
1136          * allowing for two separate entries for ICV and generated ICV (+ 2),
1137          * and the ICV data itself
1138          */
1139         alloc_len = sizeof(struct talitos_edesc);
1140         if (src_nents || dst_nents) {
1141                 dma_len = (src_nents + dst_nents + 2) *
1142                                  sizeof(struct talitos_ptr) + authsize;
1143                 alloc_len += dma_len;
1144         } else {
1145                 dma_len = 0;
1146                 alloc_len += icv_stashing ? authsize : 0;
1147         }
1148
1149         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1150         if (!edesc) {
1151                 dev_err(dev, "could not allocate edescriptor\n");
1152                 return ERR_PTR(-ENOMEM);
1153         }
1154
1155         edesc->src_nents = src_nents;
1156         edesc->dst_nents = dst_nents;
1157         edesc->src_is_chained = src_chained;
1158         edesc->dst_is_chained = dst_chained;
1159         edesc->dma_len = dma_len;
1160         edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1161                                              edesc->dma_len, DMA_BIDIRECTIONAL);
1162
1163         return edesc;
1164 }
1165
1166 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1167                                               int icv_stashing)
1168 {
1169         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1170         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1171
1172         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1173                                    areq->cryptlen, ctx->authsize, icv_stashing,
1174                                    areq->base.flags);
1175 }
1176
1177 static int aead_encrypt(struct aead_request *req)
1178 {
1179         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1180         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1181         struct talitos_edesc *edesc;
1182
1183         /* allocate extended descriptor */
1184         edesc = aead_edesc_alloc(req, 0);
1185         if (IS_ERR(edesc))
1186                 return PTR_ERR(edesc);
1187
1188         /* set encrypt */
1189         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1190
1191         return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1192 }
1193
1194
1195
1196 static int aead_decrypt(struct aead_request *req)
1197 {
1198         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1199         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1200         unsigned int authsize = ctx->authsize;
1201         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1202         struct talitos_edesc *edesc;
1203         struct scatterlist *sg;
1204         void *icvdata;
1205
1206         req->cryptlen -= authsize;
1207
1208         /* allocate extended descriptor */
1209         edesc = aead_edesc_alloc(req, 1);
1210         if (IS_ERR(edesc))
1211                 return PTR_ERR(edesc);
1212
1213         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1214             (((!edesc->src_nents && !edesc->dst_nents) ||
1215                 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) {
1216
1217                 /* decrypt and check the ICV */
1218                 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND |
1219                                   DESC_HDR_MODE1_MDEU_CICV;
1220
1221                 /* reset integrity check result bits */
1222                 edesc->desc.hdr_lo = 0;
1223
1224                 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done);
1225
1226         } else {
1227
1228                 /* Have to check the ICV with software */
1229
1230                 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1231
1232                 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1233                 if (edesc->dma_len)
1234                         icvdata = &edesc->link_tbl[edesc->src_nents +
1235                                                    edesc->dst_nents + 2];
1236                 else
1237                         icvdata = &edesc->link_tbl[0];
1238
1239                 sg = sg_last(req->src, edesc->src_nents ? : 1);
1240
1241                 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1242                        ctx->authsize);
1243
1244                 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1245         }
1246 }
1247
1248 static int aead_givencrypt(struct aead_givcrypt_request *req)
1249 {
1250         struct aead_request *areq = &req->areq;
1251         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1252         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1253         struct talitos_edesc *edesc;
1254
1255         /* allocate extended descriptor */
1256         edesc = aead_edesc_alloc(areq, 0);
1257         if (IS_ERR(edesc))
1258                 return PTR_ERR(edesc);
1259
1260         /* set encrypt */
1261         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1262
1263         memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1264         /* avoid consecutive packets going out with same IV */
1265         *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1266
1267         return ipsec_esp(edesc, areq, req->giv, req->seq,
1268                          ipsec_esp_encrypt_done);
1269 }
1270
1271 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1272                              const u8 *key, unsigned int keylen)
1273 {
1274         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1275         struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1276
1277         if (keylen > TALITOS_MAX_KEY_SIZE)
1278                 goto badkey;
1279
1280         if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1281                 goto badkey;
1282
1283         memcpy(&ctx->key, key, keylen);
1284         ctx->keylen = keylen;
1285
1286         return 0;
1287
1288 badkey:
1289         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1290         return -EINVAL;
1291 }
1292
1293 static void common_nonsnoop_unmap(struct device *dev,
1294                                   struct talitos_edesc *edesc,
1295                                   struct ablkcipher_request *areq)
1296 {
1297         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1298         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1299         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1300
1301         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1302
1303         if (edesc->dma_len)
1304                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1305                                  DMA_BIDIRECTIONAL);
1306 }
1307
1308 static void ablkcipher_done(struct device *dev,
1309                             struct talitos_desc *desc, void *context,
1310                             int err)
1311 {
1312         struct ablkcipher_request *areq = context;
1313         struct talitos_edesc *edesc =
1314                  container_of(desc, struct talitos_edesc, desc);
1315
1316         common_nonsnoop_unmap(dev, edesc, areq);
1317
1318         kfree(edesc);
1319
1320         areq->base.complete(&areq->base, err);
1321 }
1322
1323 static int common_nonsnoop(struct talitos_edesc *edesc,
1324                            struct ablkcipher_request *areq,
1325                            u8 *giv,
1326                            void (*callback) (struct device *dev,
1327                                              struct talitos_desc *desc,
1328                                              void *context, int error))
1329 {
1330         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1331         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1332         struct device *dev = ctx->dev;
1333         struct talitos_desc *desc = &edesc->desc;
1334         unsigned int cryptlen = areq->nbytes;
1335         unsigned int ivsize;
1336         int sg_count, ret;
1337
1338         /* first DWORD empty */
1339         desc->ptr[0].len = 0;
1340         desc->ptr[0].ptr = 0;
1341         desc->ptr[0].j_extent = 0;
1342
1343         /* cipher iv */
1344         ivsize = crypto_ablkcipher_ivsize(cipher);
1345         map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1346                                DMA_TO_DEVICE);
1347
1348         /* cipher key */
1349         map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1350                                (char *)&ctx->key, 0, DMA_TO_DEVICE);
1351
1352         /*
1353          * cipher in
1354          */
1355         desc->ptr[3].len = cpu_to_be16(cryptlen);
1356         desc->ptr[3].j_extent = 0;
1357
1358         sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1359                                   (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1360                                                            : DMA_TO_DEVICE,
1361                                   edesc->src_is_chained);
1362
1363         if (sg_count == 1) {
1364                 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
1365         } else {
1366                 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1367                                           &edesc->link_tbl[0]);
1368                 if (sg_count > 1) {
1369                         desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1370                         desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1371                         dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1372                                                    edesc->dma_len, DMA_BIDIRECTIONAL);
1373                 } else {
1374                         /* Only one segment now, so no link tbl needed */
1375                         desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
1376                 }
1377         }
1378
1379         /* cipher out */
1380         desc->ptr[4].len = cpu_to_be16(cryptlen);
1381         desc->ptr[4].j_extent = 0;
1382
1383         if (areq->src != areq->dst)
1384                 sg_count = talitos_map_sg(dev, areq->dst,
1385                                           edesc->dst_nents ? : 1,
1386                                           DMA_FROM_DEVICE,
1387                                           edesc->dst_is_chained);
1388
1389         if (sg_count == 1) {
1390                 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1391         } else {
1392                 struct talitos_ptr *link_tbl_ptr =
1393                         &edesc->link_tbl[edesc->src_nents + 1];
1394
1395                 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1396                 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1397                                                edesc->dma_link_tbl +
1398                                                edesc->src_nents + 1);
1399                 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1400                                           link_tbl_ptr);
1401                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1402                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1403         }
1404
1405         /* iv out */
1406         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1407                                DMA_FROM_DEVICE);
1408
1409         /* last DWORD empty */
1410         desc->ptr[6].len = 0;
1411         desc->ptr[6].ptr = 0;
1412         desc->ptr[6].j_extent = 0;
1413
1414         ret = talitos_submit(dev, desc, callback, areq);
1415         if (ret != -EINPROGRESS) {
1416                 common_nonsnoop_unmap(dev, edesc, areq);
1417                 kfree(edesc);
1418         }
1419         return ret;
1420 }
1421
1422 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *areq)
1423 {
1424         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1425         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1426
1427         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
1428                                    0, 0, areq->base.flags);
1429 }
1430
1431 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1432 {
1433         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1434         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1435         struct talitos_edesc *edesc;
1436
1437         /* allocate extended descriptor */
1438         edesc = ablkcipher_edesc_alloc(areq);
1439         if (IS_ERR(edesc))
1440                 return PTR_ERR(edesc);
1441
1442         /* set encrypt */
1443         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1444
1445         return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1446 }
1447
1448 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1449 {
1450         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1451         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1452         struct talitos_edesc *edesc;
1453
1454         /* allocate extended descriptor */
1455         edesc = ablkcipher_edesc_alloc(areq);
1456         if (IS_ERR(edesc))
1457                 return PTR_ERR(edesc);
1458
1459         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1460
1461         return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1462 }
1463
1464 struct talitos_alg_template {
1465         struct crypto_alg alg;
1466         __be32 desc_hdr_template;
1467 };
1468
1469 static struct talitos_alg_template driver_algs[] = {
1470         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1471         {
1472                 .alg = {
1473                         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1474                         .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1475                         .cra_blocksize = AES_BLOCK_SIZE,
1476                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1477                         .cra_type = &crypto_aead_type,
1478                         .cra_aead = {
1479                                 .setkey = aead_setkey,
1480                                 .setauthsize = aead_setauthsize,
1481                                 .encrypt = aead_encrypt,
1482                                 .decrypt = aead_decrypt,
1483                                 .givencrypt = aead_givencrypt,
1484                                 .geniv = "<built-in>",
1485                                 .ivsize = AES_BLOCK_SIZE,
1486                                 .maxauthsize = SHA1_DIGEST_SIZE,
1487                         }
1488                 },
1489                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1490                                      DESC_HDR_SEL0_AESU |
1491                                      DESC_HDR_MODE0_AESU_CBC |
1492                                      DESC_HDR_SEL1_MDEUA |
1493                                      DESC_HDR_MODE1_MDEU_INIT |
1494                                      DESC_HDR_MODE1_MDEU_PAD |
1495                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1496         },
1497         {
1498                 .alg = {
1499                         .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1500                         .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1501                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1502                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1503                         .cra_type = &crypto_aead_type,
1504                         .cra_aead = {
1505                                 .setkey = aead_setkey,
1506                                 .setauthsize = aead_setauthsize,
1507                                 .encrypt = aead_encrypt,
1508                                 .decrypt = aead_decrypt,
1509                                 .givencrypt = aead_givencrypt,
1510                                 .geniv = "<built-in>",
1511                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1512                                 .maxauthsize = SHA1_DIGEST_SIZE,
1513                         }
1514                 },
1515                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1516                                      DESC_HDR_SEL0_DEU |
1517                                      DESC_HDR_MODE0_DEU_CBC |
1518                                      DESC_HDR_MODE0_DEU_3DES |
1519                                      DESC_HDR_SEL1_MDEUA |
1520                                      DESC_HDR_MODE1_MDEU_INIT |
1521                                      DESC_HDR_MODE1_MDEU_PAD |
1522                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1523         },
1524         {
1525                 .alg = {
1526                         .cra_name = "authenc(hmac(sha256),cbc(aes))",
1527                         .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1528                         .cra_blocksize = AES_BLOCK_SIZE,
1529                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1530                         .cra_type = &crypto_aead_type,
1531                         .cra_aead = {
1532                                 .setkey = aead_setkey,
1533                                 .setauthsize = aead_setauthsize,
1534                                 .encrypt = aead_encrypt,
1535                                 .decrypt = aead_decrypt,
1536                                 .givencrypt = aead_givencrypt,
1537                                 .geniv = "<built-in>",
1538                                 .ivsize = AES_BLOCK_SIZE,
1539                                 .maxauthsize = SHA256_DIGEST_SIZE,
1540                         }
1541                 },
1542                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1543                                      DESC_HDR_SEL0_AESU |
1544                                      DESC_HDR_MODE0_AESU_CBC |
1545                                      DESC_HDR_SEL1_MDEUA |
1546                                      DESC_HDR_MODE1_MDEU_INIT |
1547                                      DESC_HDR_MODE1_MDEU_PAD |
1548                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1549         },
1550         {
1551                 .alg = {
1552                         .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1553                         .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1554                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1555                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1556                         .cra_type = &crypto_aead_type,
1557                         .cra_aead = {
1558                                 .setkey = aead_setkey,
1559                                 .setauthsize = aead_setauthsize,
1560                                 .encrypt = aead_encrypt,
1561                                 .decrypt = aead_decrypt,
1562                                 .givencrypt = aead_givencrypt,
1563                                 .geniv = "<built-in>",
1564                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1565                                 .maxauthsize = SHA256_DIGEST_SIZE,
1566                         }
1567                 },
1568                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1569                                      DESC_HDR_SEL0_DEU |
1570                                      DESC_HDR_MODE0_DEU_CBC |
1571                                      DESC_HDR_MODE0_DEU_3DES |
1572                                      DESC_HDR_SEL1_MDEUA |
1573                                      DESC_HDR_MODE1_MDEU_INIT |
1574                                      DESC_HDR_MODE1_MDEU_PAD |
1575                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1576         },
1577         {
1578                 .alg = {
1579                         .cra_name = "authenc(hmac(md5),cbc(aes))",
1580                         .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1581                         .cra_blocksize = AES_BLOCK_SIZE,
1582                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1583                         .cra_type = &crypto_aead_type,
1584                         .cra_aead = {
1585                                 .setkey = aead_setkey,
1586                                 .setauthsize = aead_setauthsize,
1587                                 .encrypt = aead_encrypt,
1588                                 .decrypt = aead_decrypt,
1589                                 .givencrypt = aead_givencrypt,
1590                                 .geniv = "<built-in>",
1591                                 .ivsize = AES_BLOCK_SIZE,
1592                                 .maxauthsize = MD5_DIGEST_SIZE,
1593                         }
1594                 },
1595                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1596                                      DESC_HDR_SEL0_AESU |
1597                                      DESC_HDR_MODE0_AESU_CBC |
1598                                      DESC_HDR_SEL1_MDEUA |
1599                                      DESC_HDR_MODE1_MDEU_INIT |
1600                                      DESC_HDR_MODE1_MDEU_PAD |
1601                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
1602         },
1603         {
1604                 .alg = {
1605                         .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1606                         .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1607                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1608                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1609                         .cra_type = &crypto_aead_type,
1610                         .cra_aead = {
1611                                 .setkey = aead_setkey,
1612                                 .setauthsize = aead_setauthsize,
1613                                 .encrypt = aead_encrypt,
1614                                 .decrypt = aead_decrypt,
1615                                 .givencrypt = aead_givencrypt,
1616                                 .geniv = "<built-in>",
1617                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1618                                 .maxauthsize = MD5_DIGEST_SIZE,
1619                         }
1620                 },
1621                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1622                                      DESC_HDR_SEL0_DEU |
1623                                      DESC_HDR_MODE0_DEU_CBC |
1624                                      DESC_HDR_MODE0_DEU_3DES |
1625                                      DESC_HDR_SEL1_MDEUA |
1626                                      DESC_HDR_MODE1_MDEU_INIT |
1627                                      DESC_HDR_MODE1_MDEU_PAD |
1628                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
1629         },
1630         /* ABLKCIPHER algorithms. */
1631         {
1632                 .alg = {
1633                         .cra_name = "cbc(aes)",
1634                         .cra_driver_name = "cbc-aes-talitos",
1635                         .cra_blocksize = AES_BLOCK_SIZE,
1636                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1637                                      CRYPTO_ALG_ASYNC,
1638                         .cra_type = &crypto_ablkcipher_type,
1639                         .cra_ablkcipher = {
1640                                 .setkey = ablkcipher_setkey,
1641                                 .encrypt = ablkcipher_encrypt,
1642                                 .decrypt = ablkcipher_decrypt,
1643                                 .geniv = "eseqiv",
1644                                 .min_keysize = AES_MIN_KEY_SIZE,
1645                                 .max_keysize = AES_MAX_KEY_SIZE,
1646                                 .ivsize = AES_BLOCK_SIZE,
1647                         }
1648                 },
1649                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1650                                      DESC_HDR_SEL0_AESU |
1651                                      DESC_HDR_MODE0_AESU_CBC,
1652         },
1653         {
1654                 .alg = {
1655                         .cra_name = "cbc(des3_ede)",
1656                         .cra_driver_name = "cbc-3des-talitos",
1657                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1658                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1659                                      CRYPTO_ALG_ASYNC,
1660                         .cra_type = &crypto_ablkcipher_type,
1661                         .cra_ablkcipher = {
1662                                 .setkey = ablkcipher_setkey,
1663                                 .encrypt = ablkcipher_encrypt,
1664                                 .decrypt = ablkcipher_decrypt,
1665                                 .geniv = "eseqiv",
1666                                 .min_keysize = DES3_EDE_KEY_SIZE,
1667                                 .max_keysize = DES3_EDE_KEY_SIZE,
1668                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1669                         }
1670                 },
1671                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1672                                      DESC_HDR_SEL0_DEU |
1673                                      DESC_HDR_MODE0_DEU_CBC |
1674                                      DESC_HDR_MODE0_DEU_3DES,
1675         }
1676 };
1677
1678 struct talitos_crypto_alg {
1679         struct list_head entry;
1680         struct device *dev;
1681         __be32 desc_hdr_template;
1682         struct crypto_alg crypto_alg;
1683 };
1684
1685 static int talitos_cra_init(struct crypto_tfm *tfm)
1686 {
1687         struct crypto_alg *alg = tfm->__crt_alg;
1688         struct talitos_crypto_alg *talitos_alg =
1689                  container_of(alg, struct talitos_crypto_alg, crypto_alg);
1690         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1691
1692         /* update context with ptr to dev */
1693         ctx->dev = talitos_alg->dev;
1694         /* copy descriptor header template value */
1695         ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1696
1697         /* random first IV */
1698         get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
1699
1700         return 0;
1701 }
1702
1703 /*
1704  * given the alg's descriptor header template, determine whether descriptor
1705  * type and primary/secondary execution units required match the hw
1706  * capabilities description provided in the device tree node.
1707  */
1708 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1709 {
1710         struct talitos_private *priv = dev_get_drvdata(dev);
1711         int ret;
1712
1713         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
1714               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
1715
1716         if (SECONDARY_EU(desc_hdr_template))
1717                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
1718                               & priv->exec_units);
1719
1720         return ret;
1721 }
1722
1723 static int talitos_remove(struct of_device *ofdev)
1724 {
1725         struct device *dev = &ofdev->dev;
1726         struct talitos_private *priv = dev_get_drvdata(dev);
1727         struct talitos_crypto_alg *t_alg, *n;
1728         int i;
1729
1730         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1731                 crypto_unregister_alg(&t_alg->crypto_alg);
1732                 list_del(&t_alg->entry);
1733                 kfree(t_alg);
1734         }
1735
1736         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1737                 talitos_unregister_rng(dev);
1738
1739         kfree(priv->submit_count);
1740         kfree(priv->tail);
1741         kfree(priv->head);
1742
1743         if (priv->fifo)
1744                 for (i = 0; i < priv->num_channels; i++)
1745                         kfree(priv->fifo[i]);
1746
1747         kfree(priv->fifo);
1748         kfree(priv->head_lock);
1749         kfree(priv->tail_lock);
1750
1751         if (priv->irq != NO_IRQ) {
1752                 free_irq(priv->irq, dev);
1753                 irq_dispose_mapping(priv->irq);
1754         }
1755
1756         tasklet_kill(&priv->done_task);
1757
1758         iounmap(priv->reg);
1759
1760         dev_set_drvdata(dev, NULL);
1761
1762         kfree(priv);
1763
1764         return 0;
1765 }
1766
1767 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1768                                                     struct talitos_alg_template
1769                                                            *template)
1770 {
1771         struct talitos_crypto_alg *t_alg;
1772         struct crypto_alg *alg;
1773
1774         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
1775         if (!t_alg)
1776                 return ERR_PTR(-ENOMEM);
1777
1778         alg = &t_alg->crypto_alg;
1779         *alg = template->alg;
1780
1781         alg->cra_module = THIS_MODULE;
1782         alg->cra_init = talitos_cra_init;
1783         alg->cra_priority = TALITOS_CRA_PRIORITY;
1784         alg->cra_alignmask = 0;
1785         alg->cra_ctxsize = sizeof(struct talitos_ctx);
1786
1787         t_alg->desc_hdr_template = template->desc_hdr_template;
1788         t_alg->dev = dev;
1789
1790         return t_alg;
1791 }
1792
1793 static int talitos_probe(struct of_device *ofdev,
1794                          const struct of_device_id *match)
1795 {
1796         struct device *dev = &ofdev->dev;
1797         struct device_node *np = ofdev->node;
1798         struct talitos_private *priv;
1799         const unsigned int *prop;
1800         int i, err;
1801
1802         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
1803         if (!priv)
1804                 return -ENOMEM;
1805
1806         dev_set_drvdata(dev, priv);
1807
1808         priv->ofdev = ofdev;
1809
1810         tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
1811
1812         INIT_LIST_HEAD(&priv->alg_list);
1813
1814         priv->irq = irq_of_parse_and_map(np, 0);
1815
1816         if (priv->irq == NO_IRQ) {
1817                 dev_err(dev, "failed to map irq\n");
1818                 err = -EINVAL;
1819                 goto err_out;
1820         }
1821
1822         /* get the irq line */
1823         err = request_irq(priv->irq, talitos_interrupt, 0,
1824                           dev_driver_string(dev), dev);
1825         if (err) {
1826                 dev_err(dev, "failed to request irq %d\n", priv->irq);
1827                 irq_dispose_mapping(priv->irq);
1828                 priv->irq = NO_IRQ;
1829                 goto err_out;
1830         }
1831
1832         priv->reg = of_iomap(np, 0);
1833         if (!priv->reg) {
1834                 dev_err(dev, "failed to of_iomap\n");
1835                 err = -ENOMEM;
1836                 goto err_out;
1837         }
1838
1839         /* get SEC version capabilities from device tree */
1840         prop = of_get_property(np, "fsl,num-channels", NULL);
1841         if (prop)
1842                 priv->num_channels = *prop;
1843
1844         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
1845         if (prop)
1846                 priv->chfifo_len = *prop;
1847
1848         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
1849         if (prop)
1850                 priv->exec_units = *prop;
1851
1852         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
1853         if (prop)
1854                 priv->desc_types = *prop;
1855
1856         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
1857             !priv->exec_units || !priv->desc_types) {
1858                 dev_err(dev, "invalid property data in device tree node\n");
1859                 err = -EINVAL;
1860                 goto err_out;
1861         }
1862
1863         if (of_device_is_compatible(np, "fsl,sec3.0"))
1864                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1865
1866         if (of_device_is_compatible(np, "fsl,sec2.1"))
1867                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1868
1869         priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1870                                   GFP_KERNEL);
1871         priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1872                                   GFP_KERNEL);
1873         if (!priv->head_lock || !priv->tail_lock) {
1874                 dev_err(dev, "failed to allocate fifo locks\n");
1875                 err = -ENOMEM;
1876                 goto err_out;
1877         }
1878
1879         for (i = 0; i < priv->num_channels; i++) {
1880                 spin_lock_init(&priv->head_lock[i]);
1881                 spin_lock_init(&priv->tail_lock[i]);
1882         }
1883
1884         priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1885                              priv->num_channels, GFP_KERNEL);
1886         if (!priv->fifo) {
1887                 dev_err(dev, "failed to allocate request fifo\n");
1888                 err = -ENOMEM;
1889                 goto err_out;
1890         }
1891
1892         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1893
1894         for (i = 0; i < priv->num_channels; i++) {
1895                 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
1896                                         priv->fifo_len, GFP_KERNEL);
1897                 if (!priv->fifo[i]) {
1898                         dev_err(dev, "failed to allocate request fifo %d\n", i);
1899                         err = -ENOMEM;
1900                         goto err_out;
1901                 }
1902         }
1903
1904         priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1905                                      GFP_KERNEL);
1906         if (!priv->submit_count) {
1907                 dev_err(dev, "failed to allocate fifo submit count space\n");
1908                 err = -ENOMEM;
1909                 goto err_out;
1910         }
1911         for (i = 0; i < priv->num_channels; i++)
1912                 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1));
1913
1914         priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1915         priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1916         if (!priv->head || !priv->tail) {
1917                 dev_err(dev, "failed to allocate request index space\n");
1918                 err = -ENOMEM;
1919                 goto err_out;
1920         }
1921
1922         /* reset and initialize the h/w */
1923         err = init_device(dev);
1924         if (err) {
1925                 dev_err(dev, "failed to initialize device\n");
1926                 goto err_out;
1927         }
1928
1929         /* register the RNG, if available */
1930         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
1931                 err = talitos_register_rng(dev);
1932                 if (err) {
1933                         dev_err(dev, "failed to register hwrng: %d\n", err);
1934                         goto err_out;
1935                 } else
1936                         dev_info(dev, "hwrng\n");
1937         }
1938
1939         /* register crypto algorithms the device supports */
1940         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1941                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1942                         struct talitos_crypto_alg *t_alg;
1943
1944                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1945                         if (IS_ERR(t_alg)) {
1946                                 err = PTR_ERR(t_alg);
1947                                 goto err_out;
1948                         }
1949
1950                         err = crypto_register_alg(&t_alg->crypto_alg);
1951                         if (err) {
1952                                 dev_err(dev, "%s alg registration failed\n",
1953                                         t_alg->crypto_alg.cra_driver_name);
1954                                 kfree(t_alg);
1955                         } else {
1956                                 list_add_tail(&t_alg->entry, &priv->alg_list);
1957                                 dev_info(dev, "%s\n",
1958                                          t_alg->crypto_alg.cra_driver_name);
1959                         }
1960                 }
1961         }
1962
1963         return 0;
1964
1965 err_out:
1966         talitos_remove(ofdev);
1967
1968         return err;
1969 }
1970
1971 static struct of_device_id talitos_match[] = {
1972         {
1973                 .compatible = "fsl,sec2.0",
1974         },
1975         {},
1976 };
1977 MODULE_DEVICE_TABLE(of, talitos_match);
1978
1979 static struct of_platform_driver talitos_driver = {
1980         .name = "talitos",
1981         .match_table = talitos_match,
1982         .probe = talitos_probe,
1983         .remove = talitos_remove,
1984 };
1985
1986 static int __init talitos_init(void)
1987 {
1988         return of_register_platform_driver(&talitos_driver);
1989 }
1990 module_init(talitos_init);
1991
1992 static void __exit talitos_exit(void)
1993 {
1994         of_unregister_platform_driver(&talitos_driver);
1995 }
1996 module_exit(talitos_exit);
1997
1998 MODULE_LICENSE("GPL");
1999 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2000 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");