]> Pileus Git - ~andy/linux/blob - drivers/crypto/caam/caamalg.c
crypto: caam - link_tbl rename
[~andy/linux] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH              16
66
67 /* length of descriptors text */
68 #define DESC_JOB_IO_LEN                 (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
69
70 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
71 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
72 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
73 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
74
75 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
77                                          20 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
79                                          15 * CAAM_CMD_SZ)
80
81 #define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
82                                          CAAM_MAX_KEY_SIZE)
83 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
84
85 #ifdef DEBUG
86 /* for print_hex_dumps with line references */
87 #define xstr(s) str(s)
88 #define str(s) #s
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93
94 /* Set DK bit in class 1 operation if shared */
95 static inline void append_dec_op1(u32 *desc, u32 type)
96 {
97         u32 *jump_cmd, *uncond_jump_cmd;
98
99         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
100         append_operation(desc, type | OP_ALG_AS_INITFINAL |
101                          OP_ALG_DECRYPT);
102         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
103         set_jump_tgt_here(desc, jump_cmd);
104         append_operation(desc, type | OP_ALG_AS_INITFINAL |
105                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
106         set_jump_tgt_here(desc, uncond_jump_cmd);
107 }
108
109 /*
110  * Wait for completion of class 1 key loading before allowing
111  * error propagation
112  */
113 static inline void append_dec_shr_done(u32 *desc)
114 {
115         u32 *jump_cmd;
116
117         jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
118         set_jump_tgt_here(desc, jump_cmd);
119         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
120 }
121
122 /*
123  * For aead functions, read payload and write payload,
124  * both of which are specified in req->src and req->dst
125  */
126 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
127 {
128         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
129                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
130         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 }
132
133 /*
134  * For aead encrypt and decrypt, read iv for both classes
135  */
136 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
137 {
138         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
139                    LDST_CLASS_1_CCB | ivsize);
140         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 }
142
143 /*
144  * For ablkcipher encrypt and decrypt, read from req->src and
145  * write to req->dst
146  */
147 static inline void ablkcipher_append_src_dst(u32 *desc)
148 {
149         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
150         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
151         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
152                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
153         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
154 }
155
156 /*
157  * If all data, including src (with assoc and iv) or dst (with iv only) are
158  * contiguous
159  */
160 #define GIV_SRC_CONTIG          1
161 #define GIV_DST_CONTIG          (1 << 1)
162
163 /*
164  * per-session context
165  */
166 struct caam_ctx {
167         struct device *jrdev;
168         u32 sh_desc_enc[DESC_MAX_USED_LEN];
169         u32 sh_desc_dec[DESC_MAX_USED_LEN];
170         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
171         dma_addr_t sh_desc_enc_dma;
172         dma_addr_t sh_desc_dec_dma;
173         dma_addr_t sh_desc_givenc_dma;
174         u32 class1_alg_type;
175         u32 class2_alg_type;
176         u32 alg_op;
177         u8 key[CAAM_MAX_KEY_SIZE];
178         dma_addr_t key_dma;
179         unsigned int enckeylen;
180         unsigned int split_key_len;
181         unsigned int split_key_pad_len;
182         unsigned int authsize;
183 };
184
185 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
186                             int keys_fit_inline)
187 {
188         if (keys_fit_inline) {
189                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
190                                   ctx->split_key_len, CLASS_2 |
191                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
192                 append_key_as_imm(desc, (void *)ctx->key +
193                                   ctx->split_key_pad_len, ctx->enckeylen,
194                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195         } else {
196                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
197                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
198                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
199                            ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200         }
201 }
202
203 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
204                                   int keys_fit_inline)
205 {
206         u32 *key_jump_cmd;
207
208         init_sh_desc(desc, HDR_SHARE_WAIT);
209
210         /* Skip if already shared */
211         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
212                                    JUMP_COND_SHRD);
213
214         append_key_aead(desc, ctx, keys_fit_inline);
215
216         set_jump_tgt_here(desc, key_jump_cmd);
217
218         /* Propagate errors from shared to job descriptor */
219         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
220 }
221
222 static int aead_set_sh_desc(struct crypto_aead *aead)
223 {
224         struct aead_tfm *tfm = &aead->base.crt_aead;
225         struct caam_ctx *ctx = crypto_aead_ctx(aead);
226         struct device *jrdev = ctx->jrdev;
227         bool keys_fit_inline = 0;
228         u32 *key_jump_cmd, *jump_cmd;
229         u32 geniv, moveiv;
230         u32 *desc;
231
232         if (!ctx->enckeylen || !ctx->authsize)
233                 return 0;
234
235         /*
236          * Job Descriptor and Shared Descriptors
237          * must all fit into the 64-word Descriptor h/w Buffer
238          */
239         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
240             ctx->split_key_pad_len + ctx->enckeylen <=
241             CAAM_DESC_BYTES_MAX)
242                 keys_fit_inline = 1;
243
244         /* aead_encrypt shared descriptor */
245         desc = ctx->sh_desc_enc;
246
247         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
248
249         /* Class 2 operation */
250         append_operation(desc, ctx->class2_alg_type |
251                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
252
253         /* cryptlen = seqoutlen - authsize */
254         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
255
256         /* assoclen + cryptlen = seqinlen - ivsize */
257         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
258
259         /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
260         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
261
262         /* read assoc before reading payload */
263         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
264                              KEY_VLF);
265         aead_append_ld_iv(desc, tfm->ivsize);
266
267         /* Class 1 operation */
268         append_operation(desc, ctx->class1_alg_type |
269                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
270
271         /* Read and write cryptlen bytes */
272         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
273         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
274         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
275
276         /* Write ICV */
277         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
278                          LDST_SRCDST_BYTE_CONTEXT);
279
280         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
281                                               desc_bytes(desc),
282                                               DMA_TO_DEVICE);
283         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
284                 dev_err(jrdev, "unable to map shared descriptor\n");
285                 return -ENOMEM;
286         }
287 #ifdef DEBUG
288         print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
289                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
290                        desc_bytes(desc), 1);
291 #endif
292
293         /*
294          * Job Descriptor and Shared Descriptors
295          * must all fit into the 64-word Descriptor h/w Buffer
296          */
297         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
298             ctx->split_key_pad_len + ctx->enckeylen <=
299             CAAM_DESC_BYTES_MAX)
300                 keys_fit_inline = 1;
301
302         desc = ctx->sh_desc_dec;
303
304         /* aead_decrypt shared descriptor */
305         init_sh_desc(desc, HDR_SHARE_WAIT);
306
307         /* Skip if already shared */
308         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
309                                    JUMP_COND_SHRD);
310
311         append_key_aead(desc, ctx, keys_fit_inline);
312
313         /* Only propagate error immediately if shared */
314         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
315         set_jump_tgt_here(desc, key_jump_cmd);
316         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
317         set_jump_tgt_here(desc, jump_cmd);
318
319         /* Class 2 operation */
320         append_operation(desc, ctx->class2_alg_type |
321                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
322
323         /* assoclen + cryptlen = seqinlen - ivsize */
324         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
325                                 ctx->authsize + tfm->ivsize)
326         /* assoclen = (assoclen + cryptlen) - cryptlen */
327         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
328         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
329
330         /* read assoc before reading payload */
331         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
332                              KEY_VLF);
333
334         aead_append_ld_iv(desc, tfm->ivsize);
335
336         append_dec_op1(desc, ctx->class1_alg_type);
337
338         /* Read and write cryptlen bytes */
339         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
340         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
341         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
342
343         /* Load ICV */
344         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
345                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
346         append_dec_shr_done(desc);
347
348         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
349                                               desc_bytes(desc),
350                                               DMA_TO_DEVICE);
351         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
352                 dev_err(jrdev, "unable to map shared descriptor\n");
353                 return -ENOMEM;
354         }
355 #ifdef DEBUG
356         print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
357                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
358                        desc_bytes(desc), 1);
359 #endif
360
361         /*
362          * Job Descriptor and Shared Descriptors
363          * must all fit into the 64-word Descriptor h/w Buffer
364          */
365         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
366             ctx->split_key_pad_len + ctx->enckeylen <=
367             CAAM_DESC_BYTES_MAX)
368                 keys_fit_inline = 1;
369
370         /* aead_givencrypt shared descriptor */
371         desc = ctx->sh_desc_givenc;
372
373         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
374
375         /* Generate IV */
376         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
377                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
378                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
379         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
380                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
381         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
382         append_move(desc, MOVE_SRC_INFIFO |
383                     MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
384         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
385
386         /* Copy IV to class 1 context */
387         append_move(desc, MOVE_SRC_CLASS1CTX |
388                     MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
389
390         /* Return to encryption */
391         append_operation(desc, ctx->class2_alg_type |
392                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
393
394         /* ivsize + cryptlen = seqoutlen - authsize */
395         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
396
397         /* assoclen = seqinlen - (ivsize + cryptlen) */
398         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
399
400         /* read assoc before reading payload */
401         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
402                              KEY_VLF);
403
404         /* Copy iv from class 1 ctx to class 2 fifo*/
405         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
406                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
407         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
408                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
409         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
410                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
411
412         /* Class 1 operation */
413         append_operation(desc, ctx->class1_alg_type |
414                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
415
416         /* Will write ivsize + cryptlen */
417         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
418
419         /* Not need to reload iv */
420         append_seq_fifo_load(desc, tfm->ivsize,
421                              FIFOLD_CLASS_SKIP);
422
423         /* Will read cryptlen */
424         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
425         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
426
427         /* Write ICV */
428         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
429                          LDST_SRCDST_BYTE_CONTEXT);
430
431         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
432                                                  desc_bytes(desc),
433                                                  DMA_TO_DEVICE);
434         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
435                 dev_err(jrdev, "unable to map shared descriptor\n");
436                 return -ENOMEM;
437         }
438 #ifdef DEBUG
439         print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
440                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
441                        desc_bytes(desc), 1);
442 #endif
443
444         return 0;
445 }
446
447 static int aead_setauthsize(struct crypto_aead *authenc,
448                                     unsigned int authsize)
449 {
450         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
451
452         ctx->authsize = authsize;
453         aead_set_sh_desc(authenc);
454
455         return 0;
456 }
457
458 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
459                               u32 authkeylen)
460 {
461         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
462                                ctx->split_key_pad_len, key_in, authkeylen,
463                                ctx->alg_op);
464 }
465
466 static int aead_setkey(struct crypto_aead *aead,
467                                const u8 *key, unsigned int keylen)
468 {
469         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
470         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
471         struct caam_ctx *ctx = crypto_aead_ctx(aead);
472         struct device *jrdev = ctx->jrdev;
473         struct rtattr *rta = (void *)key;
474         struct crypto_authenc_key_param *param;
475         unsigned int authkeylen;
476         unsigned int enckeylen;
477         int ret = 0;
478
479         param = RTA_DATA(rta);
480         enckeylen = be32_to_cpu(param->enckeylen);
481
482         key += RTA_ALIGN(rta->rta_len);
483         keylen -= RTA_ALIGN(rta->rta_len);
484
485         if (keylen < enckeylen)
486                 goto badkey;
487
488         authkeylen = keylen - enckeylen;
489
490         if (keylen > CAAM_MAX_KEY_SIZE)
491                 goto badkey;
492
493         /* Pick class 2 key length from algorithm submask */
494         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
495                                       OP_ALG_ALGSEL_SHIFT] * 2;
496         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
497
498 #ifdef DEBUG
499         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
500                keylen, enckeylen, authkeylen);
501         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
502                ctx->split_key_len, ctx->split_key_pad_len);
503         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
504                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
505 #endif
506
507         ret = gen_split_aead_key(ctx, key, authkeylen);
508         if (ret) {
509                 goto badkey;
510         }
511
512         /* postpend encryption key to auth split key */
513         memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
514
515         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
516                                        enckeylen, DMA_TO_DEVICE);
517         if (dma_mapping_error(jrdev, ctx->key_dma)) {
518                 dev_err(jrdev, "unable to map key i/o memory\n");
519                 return -ENOMEM;
520         }
521 #ifdef DEBUG
522         print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
523                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
524                        ctx->split_key_pad_len + enckeylen, 1);
525 #endif
526
527         ctx->enckeylen = enckeylen;
528
529         ret = aead_set_sh_desc(aead);
530         if (ret) {
531                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
532                                  enckeylen, DMA_TO_DEVICE);
533         }
534
535         return ret;
536 badkey:
537         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
538         return -EINVAL;
539 }
540
541 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
542                              const u8 *key, unsigned int keylen)
543 {
544         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
545         struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
546         struct device *jrdev = ctx->jrdev;
547         int ret = 0;
548         u32 *key_jump_cmd, *jump_cmd;
549         u32 *desc;
550
551 #ifdef DEBUG
552         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
553                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554 #endif
555
556         memcpy(ctx->key, key, keylen);
557         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
558                                       DMA_TO_DEVICE);
559         if (dma_mapping_error(jrdev, ctx->key_dma)) {
560                 dev_err(jrdev, "unable to map key i/o memory\n");
561                 return -ENOMEM;
562         }
563         ctx->enckeylen = keylen;
564
565         /* ablkcipher_encrypt shared descriptor */
566         desc = ctx->sh_desc_enc;
567         init_sh_desc(desc, HDR_SHARE_WAIT);
568         /* Skip if already shared */
569         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
570                                    JUMP_COND_SHRD);
571
572         /* Load class1 key only */
573         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
574                           ctx->enckeylen, CLASS_1 |
575                           KEY_DEST_CLASS_REG);
576
577         set_jump_tgt_here(desc, key_jump_cmd);
578
579         /* Propagate errors from shared to job descriptor */
580         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
581
582         /* Load iv */
583         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
584                    LDST_CLASS_1_CCB | tfm->ivsize);
585
586         /* Load operation */
587         append_operation(desc, ctx->class1_alg_type |
588                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
589
590         /* Perform operation */
591         ablkcipher_append_src_dst(desc);
592
593         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
594                                               desc_bytes(desc),
595                                               DMA_TO_DEVICE);
596         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
597                 dev_err(jrdev, "unable to map shared descriptor\n");
598                 return -ENOMEM;
599         }
600 #ifdef DEBUG
601         print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
602                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
603                        desc_bytes(desc), 1);
604 #endif
605         /* ablkcipher_decrypt shared descriptor */
606         desc = ctx->sh_desc_dec;
607
608         init_sh_desc(desc, HDR_SHARE_WAIT);
609         /* Skip if already shared */
610         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
611                                    JUMP_COND_SHRD);
612
613         /* Load class1 key only */
614         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
615                           ctx->enckeylen, CLASS_1 |
616                           KEY_DEST_CLASS_REG);
617
618         /* For aead, only propagate error immediately if shared */
619         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
620         set_jump_tgt_here(desc, key_jump_cmd);
621         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
622         set_jump_tgt_here(desc, jump_cmd);
623
624         /* load IV */
625         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
626                    LDST_CLASS_1_CCB | tfm->ivsize);
627
628         /* Choose operation */
629         append_dec_op1(desc, ctx->class1_alg_type);
630
631         /* Perform operation */
632         ablkcipher_append_src_dst(desc);
633
634         /* Wait for key to load before allowing propagating error */
635         append_dec_shr_done(desc);
636
637         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638                                               desc_bytes(desc),
639                                               DMA_TO_DEVICE);
640         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
641                 dev_err(jrdev, "unable to map shared descriptor\n");
642                 return -ENOMEM;
643         }
644
645 #ifdef DEBUG
646         print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
647                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
648                        desc_bytes(desc), 1);
649 #endif
650
651         return ret;
652 }
653
654 /*
655  * aead_edesc - s/w-extended aead descriptor
656  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
657  * @src_nents: number of segments in input scatterlist
658  * @dst_nents: number of segments in output scatterlist
659  * @iv_dma: dma address of iv for checking continuity and link table
660  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
661  * @sec4_sg_bytes: length of dma mapped sec4_sg space
662  * @sec4_sg_dma: bus physical mapped address of h/w link table
663  * @hw_desc: the h/w job descriptor followed by any referenced link tables
664  */
665 struct aead_edesc {
666         int assoc_nents;
667         int src_nents;
668         int dst_nents;
669         dma_addr_t iv_dma;
670         int sec4_sg_bytes;
671         dma_addr_t sec4_sg_dma;
672         struct sec4_sg_entry *sec4_sg;
673         u32 hw_desc[0];
674 };
675
676 /*
677  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
678  * @src_nents: number of segments in input scatterlist
679  * @dst_nents: number of segments in output scatterlist
680  * @iv_dma: dma address of iv for checking continuity and link table
681  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
682  * @sec4_sg_bytes: length of dma mapped sec4_sg space
683  * @sec4_sg_dma: bus physical mapped address of h/w link table
684  * @hw_desc: the h/w job descriptor followed by any referenced link tables
685  */
686 struct ablkcipher_edesc {
687         int src_nents;
688         int dst_nents;
689         dma_addr_t iv_dma;
690         int sec4_sg_bytes;
691         dma_addr_t sec4_sg_dma;
692         struct sec4_sg_entry *sec4_sg;
693         u32 hw_desc[0];
694 };
695
696 static void caam_unmap(struct device *dev, struct scatterlist *src,
697                        struct scatterlist *dst, int src_nents, int dst_nents,
698                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
699                        int sec4_sg_bytes)
700 {
701         if (unlikely(dst != src)) {
702                 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
703                 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
704         } else {
705                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
706         }
707
708         if (iv_dma)
709                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
710         if (sec4_sg_bytes)
711                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
712                                  DMA_TO_DEVICE);
713 }
714
715 static void aead_unmap(struct device *dev,
716                        struct aead_edesc *edesc,
717                        struct aead_request *req)
718 {
719         struct crypto_aead *aead = crypto_aead_reqtfm(req);
720         int ivsize = crypto_aead_ivsize(aead);
721
722         dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
723
724         caam_unmap(dev, req->src, req->dst,
725                    edesc->src_nents, edesc->dst_nents,
726                    edesc->iv_dma, ivsize, edesc->sec4_sg_dma,
727                    edesc->sec4_sg_bytes);
728 }
729
730 static void ablkcipher_unmap(struct device *dev,
731                              struct ablkcipher_edesc *edesc,
732                              struct ablkcipher_request *req)
733 {
734         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
735         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
736
737         caam_unmap(dev, req->src, req->dst,
738                    edesc->src_nents, edesc->dst_nents,
739                    edesc->iv_dma, ivsize, edesc->sec4_sg_dma,
740                    edesc->sec4_sg_bytes);
741 }
742
743 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
744                                    void *context)
745 {
746         struct aead_request *req = context;
747         struct aead_edesc *edesc;
748 #ifdef DEBUG
749         struct crypto_aead *aead = crypto_aead_reqtfm(req);
750         struct caam_ctx *ctx = crypto_aead_ctx(aead);
751         int ivsize = crypto_aead_ivsize(aead);
752
753         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
754 #endif
755
756         edesc = (struct aead_edesc *)((char *)desc -
757                  offsetof(struct aead_edesc, hw_desc));
758
759         if (err) {
760                 char tmp[CAAM_ERROR_STR_MAX];
761
762                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
763         }
764
765         aead_unmap(jrdev, edesc, req);
766
767 #ifdef DEBUG
768         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
769                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
770                        req->assoclen , 1);
771         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
772                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
773                        edesc->src_nents ? 100 : ivsize, 1);
774         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
775                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
776                        edesc->src_nents ? 100 : req->cryptlen +
777                        ctx->authsize + 4, 1);
778 #endif
779
780         kfree(edesc);
781
782         aead_request_complete(req, err);
783 }
784
785 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
786                                    void *context)
787 {
788         struct aead_request *req = context;
789         struct aead_edesc *edesc;
790 #ifdef DEBUG
791         struct crypto_aead *aead = crypto_aead_reqtfm(req);
792         struct caam_ctx *ctx = crypto_aead_ctx(aead);
793         int ivsize = crypto_aead_ivsize(aead);
794
795         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
796 #endif
797
798         edesc = (struct aead_edesc *)((char *)desc -
799                  offsetof(struct aead_edesc, hw_desc));
800
801 #ifdef DEBUG
802         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
803                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
804                        ivsize, 1);
805         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
806                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
807                        req->cryptlen, 1);
808 #endif
809
810         if (err) {
811                 char tmp[CAAM_ERROR_STR_MAX];
812
813                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
814         }
815
816         aead_unmap(jrdev, edesc, req);
817
818         /*
819          * verify hw auth check passed else return -EBADMSG
820          */
821         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
822                 err = -EBADMSG;
823
824 #ifdef DEBUG
825         print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
826                        DUMP_PREFIX_ADDRESS, 16, 4,
827                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
828                        sizeof(struct iphdr) + req->assoclen +
829                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
830                        ctx->authsize + 36, 1);
831         if (!err && edesc->sec4_sg_bytes) {
832                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
833                 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
834                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
835                         sg->length + ctx->authsize + 16, 1);
836         }
837 #endif
838
839         kfree(edesc);
840
841         aead_request_complete(req, err);
842 }
843
844 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
845                                    void *context)
846 {
847         struct ablkcipher_request *req = context;
848         struct ablkcipher_edesc *edesc;
849 #ifdef DEBUG
850         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
851         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
852
853         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
854 #endif
855
856         edesc = (struct ablkcipher_edesc *)((char *)desc -
857                  offsetof(struct ablkcipher_edesc, hw_desc));
858
859         if (err) {
860                 char tmp[CAAM_ERROR_STR_MAX];
861
862                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
863         }
864
865 #ifdef DEBUG
866         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
867                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
868                        edesc->src_nents > 1 ? 100 : ivsize, 1);
869         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
870                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
871                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
872 #endif
873
874         ablkcipher_unmap(jrdev, edesc, req);
875         kfree(edesc);
876
877         ablkcipher_request_complete(req, err);
878 }
879
880 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
881                                     void *context)
882 {
883         struct ablkcipher_request *req = context;
884         struct ablkcipher_edesc *edesc;
885 #ifdef DEBUG
886         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
887         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
888
889         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
890 #endif
891
892         edesc = (struct ablkcipher_edesc *)((char *)desc -
893                  offsetof(struct ablkcipher_edesc, hw_desc));
894         if (err) {
895                 char tmp[CAAM_ERROR_STR_MAX];
896
897                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
898         }
899
900 #ifdef DEBUG
901         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
902                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
903                        ivsize, 1);
904         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
905                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
906                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
907 #endif
908
909         ablkcipher_unmap(jrdev, edesc, req);
910         kfree(edesc);
911
912         ablkcipher_request_complete(req, err);
913 }
914
915 /*
916  * Fill in aead job descriptor
917  */
918 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
919                           struct aead_edesc *edesc,
920                           struct aead_request *req,
921                           bool all_contig, bool encrypt)
922 {
923         struct crypto_aead *aead = crypto_aead_reqtfm(req);
924         struct caam_ctx *ctx = crypto_aead_ctx(aead);
925         int ivsize = crypto_aead_ivsize(aead);
926         int authsize = ctx->authsize;
927         u32 *desc = edesc->hw_desc;
928         u32 out_options = 0, in_options;
929         dma_addr_t dst_dma, src_dma;
930         int len, sec4_sg_index = 0;
931
932 #ifdef DEBUG
933         debug("assoclen %d cryptlen %d authsize %d\n",
934               req->assoclen, req->cryptlen, authsize);
935         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
936                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
937                        req->assoclen , 1);
938         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
939                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
940                        edesc->src_nents ? 100 : ivsize, 1);
941         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
942                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
943                         edesc->src_nents ? 100 : req->cryptlen, 1);
944         print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
945                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
946                        desc_bytes(sh_desc), 1);
947 #endif
948
949         len = desc_len(sh_desc);
950         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
951
952         if (all_contig) {
953                 src_dma = sg_dma_address(req->assoc);
954                 in_options = 0;
955         } else {
956                 src_dma = edesc->sec4_sg_dma;
957                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
958                                  (edesc->src_nents ? : 1);
959                 in_options = LDST_SGF;
960         }
961         if (encrypt)
962                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
963                                   req->cryptlen - authsize, in_options);
964         else
965                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
966                                   req->cryptlen, in_options);
967
968         if (likely(req->src == req->dst)) {
969                 if (all_contig) {
970                         dst_dma = sg_dma_address(req->src);
971                 } else {
972                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
973                                   ((edesc->assoc_nents ? : 1) + 1);
974                         out_options = LDST_SGF;
975                 }
976         } else {
977                 if (!edesc->dst_nents) {
978                         dst_dma = sg_dma_address(req->dst);
979                 } else {
980                         dst_dma = edesc->sec4_sg_dma +
981                                   sec4_sg_index *
982                                   sizeof(struct sec4_sg_entry);
983                         out_options = LDST_SGF;
984                 }
985         }
986         if (encrypt)
987                 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
988         else
989                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
990                                    out_options);
991 }
992
993 /*
994  * Fill in aead givencrypt job descriptor
995  */
996 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
997                               struct aead_edesc *edesc,
998                               struct aead_request *req,
999                               int contig)
1000 {
1001         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1002         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1003         int ivsize = crypto_aead_ivsize(aead);
1004         int authsize = ctx->authsize;
1005         u32 *desc = edesc->hw_desc;
1006         u32 out_options = 0, in_options;
1007         dma_addr_t dst_dma, src_dma;
1008         int len, sec4_sg_index = 0;
1009
1010 #ifdef DEBUG
1011         debug("assoclen %d cryptlen %d authsize %d\n",
1012               req->assoclen, req->cryptlen, authsize);
1013         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1014                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1015                        req->assoclen , 1);
1016         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1017                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1018         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1019                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1020                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1021         print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1022                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1023                        desc_bytes(sh_desc), 1);
1024 #endif
1025
1026         len = desc_len(sh_desc);
1027         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1028
1029         if (contig & GIV_SRC_CONTIG) {
1030                 src_dma = sg_dma_address(req->assoc);
1031                 in_options = 0;
1032         } else {
1033                 src_dma = edesc->sec4_sg_dma;
1034                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1035                 in_options = LDST_SGF;
1036         }
1037         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1038                           req->cryptlen - authsize, in_options);
1039
1040         if (contig & GIV_DST_CONTIG) {
1041                 dst_dma = edesc->iv_dma;
1042         } else {
1043                 if (likely(req->src == req->dst)) {
1044                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1045                                   edesc->assoc_nents;
1046                         out_options = LDST_SGF;
1047                 } else {
1048                         dst_dma = edesc->sec4_sg_dma +
1049                                   sec4_sg_index *
1050                                   sizeof(struct sec4_sg_entry);
1051                         out_options = LDST_SGF;
1052                 }
1053         }
1054
1055         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1056 }
1057
1058 /*
1059  * Fill in ablkcipher job descriptor
1060  */
1061 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1062                                 struct ablkcipher_edesc *edesc,
1063                                 struct ablkcipher_request *req,
1064                                 bool iv_contig)
1065 {
1066         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1067         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1068         u32 *desc = edesc->hw_desc;
1069         u32 out_options = 0, in_options;
1070         dma_addr_t dst_dma, src_dma;
1071         int len, sec4_sg_index = 0;
1072
1073 #ifdef DEBUG
1074         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1075                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1076                        ivsize, 1);
1077         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1078                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1079                        edesc->src_nents ? 100 : req->nbytes, 1);
1080 #endif
1081
1082         len = desc_len(sh_desc);
1083         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1084
1085         if (iv_contig) {
1086                 src_dma = edesc->iv_dma;
1087                 in_options = 0;
1088         } else {
1089                 src_dma = edesc->sec4_sg_dma;
1090                 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1091                 in_options = LDST_SGF;
1092         }
1093         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1094
1095         if (likely(req->src == req->dst)) {
1096                 if (!edesc->src_nents && iv_contig) {
1097                         dst_dma = sg_dma_address(req->src);
1098                 } else {
1099                         dst_dma = edesc->sec4_sg_dma +
1100                                 sizeof(struct sec4_sg_entry);
1101                         out_options = LDST_SGF;
1102                 }
1103         } else {
1104                 if (!edesc->dst_nents) {
1105                         dst_dma = sg_dma_address(req->dst);
1106                 } else {
1107                         dst_dma = edesc->sec4_sg_dma +
1108                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
1109                         out_options = LDST_SGF;
1110                 }
1111         }
1112         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1113 }
1114
1115 /*
1116  * allocate and map the aead extended descriptor
1117  */
1118 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1119                                            int desc_bytes, bool *all_contig_ptr)
1120 {
1121         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1122         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1123         struct device *jrdev = ctx->jrdev;
1124         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1125                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1126         int assoc_nents, src_nents, dst_nents = 0;
1127         struct aead_edesc *edesc;
1128         dma_addr_t iv_dma = 0;
1129         int sgc;
1130         bool all_contig = true;
1131         int ivsize = crypto_aead_ivsize(aead);
1132         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1133
1134         assoc_nents = sg_count(req->assoc, req->assoclen);
1135         src_nents = sg_count(req->src, req->cryptlen);
1136
1137         if (unlikely(req->dst != req->src))
1138                 dst_nents = sg_count(req->dst, req->cryptlen);
1139
1140         sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1141                          DMA_BIDIRECTIONAL);
1142         if (likely(req->src == req->dst)) {
1143                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1144                                  DMA_BIDIRECTIONAL);
1145         } else {
1146                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1147                                  DMA_TO_DEVICE);
1148                 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1149                                  DMA_FROM_DEVICE);
1150         }
1151
1152         /* Check if data are contiguous */
1153         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1154         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1155             iv_dma || src_nents || iv_dma + ivsize !=
1156             sg_dma_address(req->src)) {
1157                 all_contig = false;
1158                 assoc_nents = assoc_nents ? : 1;
1159                 src_nents = src_nents ? : 1;
1160                 sec4_sg_len = assoc_nents + 1 + src_nents;
1161         }
1162         sec4_sg_len += dst_nents;
1163
1164         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1165
1166         /* allocate space for base edesc and hw desc commands, link tables */
1167         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1168                         sec4_sg_bytes, GFP_DMA | flags);
1169         if (!edesc) {
1170                 dev_err(jrdev, "could not allocate extended descriptor\n");
1171                 return ERR_PTR(-ENOMEM);
1172         }
1173
1174         edesc->assoc_nents = assoc_nents;
1175         edesc->src_nents = src_nents;
1176         edesc->dst_nents = dst_nents;
1177         edesc->iv_dma = iv_dma;
1178         edesc->sec4_sg_bytes = sec4_sg_bytes;
1179         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1180                          desc_bytes;
1181         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1182                                             sec4_sg_bytes, DMA_TO_DEVICE);
1183         *all_contig_ptr = all_contig;
1184
1185         sec4_sg_index = 0;
1186         if (!all_contig) {
1187                 sg_to_sec4_sg(req->assoc,
1188                               (assoc_nents ? : 1),
1189                               edesc->sec4_sg +
1190                               sec4_sg_index, 0);
1191                 sec4_sg_index += assoc_nents ? : 1;
1192                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1193                                    iv_dma, ivsize, 0);
1194                 sec4_sg_index += 1;
1195                 sg_to_sec4_sg_last(req->src,
1196                                    (src_nents ? : 1),
1197                                    edesc->sec4_sg +
1198                                    sec4_sg_index, 0);
1199                 sec4_sg_index += src_nents ? : 1;
1200         }
1201         if (dst_nents) {
1202                 sg_to_sec4_sg_last(req->dst, dst_nents,
1203                                    edesc->sec4_sg + sec4_sg_index, 0);
1204         }
1205
1206         return edesc;
1207 }
1208
1209 static int aead_encrypt(struct aead_request *req)
1210 {
1211         struct aead_edesc *edesc;
1212         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1213         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1214         struct device *jrdev = ctx->jrdev;
1215         bool all_contig;
1216         u32 *desc;
1217         int ret = 0;
1218
1219         req->cryptlen += ctx->authsize;
1220
1221         /* allocate extended descriptor */
1222         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1223                                  CAAM_CMD_SZ, &all_contig);
1224         if (IS_ERR(edesc))
1225                 return PTR_ERR(edesc);
1226
1227         /* Create and submit job descriptor */
1228         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1229                       all_contig, true);
1230 #ifdef DEBUG
1231         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1232                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1233                        desc_bytes(edesc->hw_desc), 1);
1234 #endif
1235
1236         desc = edesc->hw_desc;
1237         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1238         if (!ret) {
1239                 ret = -EINPROGRESS;
1240         } else {
1241                 aead_unmap(jrdev, edesc, req);
1242                 kfree(edesc);
1243         }
1244
1245         return ret;
1246 }
1247
1248 static int aead_decrypt(struct aead_request *req)
1249 {
1250         struct aead_edesc *edesc;
1251         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1252         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1253         struct device *jrdev = ctx->jrdev;
1254         bool all_contig;
1255         u32 *desc;
1256         int ret = 0;
1257
1258         /* allocate extended descriptor */
1259         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1260                                  CAAM_CMD_SZ, &all_contig);
1261         if (IS_ERR(edesc))
1262                 return PTR_ERR(edesc);
1263
1264 #ifdef DEBUG
1265         print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1266                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1267                        req->cryptlen, 1);
1268 #endif
1269
1270         /* Create and submit job descriptor*/
1271         init_aead_job(ctx->sh_desc_dec,
1272                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1273 #ifdef DEBUG
1274         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1275                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1276                        desc_bytes(edesc->hw_desc), 1);
1277 #endif
1278
1279         desc = edesc->hw_desc;
1280         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1281         if (!ret) {
1282                 ret = -EINPROGRESS;
1283         } else {
1284                 aead_unmap(jrdev, edesc, req);
1285                 kfree(edesc);
1286         }
1287
1288         return ret;
1289 }
1290
1291 /*
1292  * allocate and map the aead extended descriptor for aead givencrypt
1293  */
1294 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1295                                                *greq, int desc_bytes,
1296                                                u32 *contig_ptr)
1297 {
1298         struct aead_request *req = &greq->areq;
1299         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1300         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1301         struct device *jrdev = ctx->jrdev;
1302         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1303                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1304         int assoc_nents, src_nents, dst_nents = 0;
1305         struct aead_edesc *edesc;
1306         dma_addr_t iv_dma = 0;
1307         int sgc;
1308         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1309         int ivsize = crypto_aead_ivsize(aead);
1310         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1311
1312         assoc_nents = sg_count(req->assoc, req->assoclen);
1313         src_nents = sg_count(req->src, req->cryptlen);
1314
1315         if (unlikely(req->dst != req->src))
1316                 dst_nents = sg_count(req->dst, req->cryptlen);
1317
1318         sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1319                          DMA_BIDIRECTIONAL);
1320         if (likely(req->src == req->dst)) {
1321                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1322                                  DMA_BIDIRECTIONAL);
1323         } else {
1324                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1325                                  DMA_TO_DEVICE);
1326                 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1327                                  DMA_FROM_DEVICE);
1328         }
1329
1330         /* Check if data are contiguous */
1331         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1332         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1333             iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1334                 contig &= ~GIV_SRC_CONTIG;
1335         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1336                 contig &= ~GIV_DST_CONTIG;
1337                 if (unlikely(req->src != req->dst)) {
1338                         dst_nents = dst_nents ? : 1;
1339                         sec4_sg_len += 1;
1340                 }
1341         if (!(contig & GIV_SRC_CONTIG)) {
1342                 assoc_nents = assoc_nents ? : 1;
1343                 src_nents = src_nents ? : 1;
1344                 sec4_sg_len += assoc_nents + 1 + src_nents;
1345                 if (likely(req->src == req->dst))
1346                         contig &= ~GIV_DST_CONTIG;
1347         }
1348         sec4_sg_len += dst_nents;
1349
1350         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1351
1352         /* allocate space for base edesc and hw desc commands, link tables */
1353         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1354                         sec4_sg_bytes, GFP_DMA | flags);
1355         if (!edesc) {
1356                 dev_err(jrdev, "could not allocate extended descriptor\n");
1357                 return ERR_PTR(-ENOMEM);
1358         }
1359
1360         edesc->assoc_nents = assoc_nents;
1361         edesc->src_nents = src_nents;
1362         edesc->dst_nents = dst_nents;
1363         edesc->iv_dma = iv_dma;
1364         edesc->sec4_sg_bytes = sec4_sg_bytes;
1365         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1366                          desc_bytes;
1367         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1368                                             sec4_sg_bytes, DMA_TO_DEVICE);
1369         *contig_ptr = contig;
1370
1371         sec4_sg_index = 0;
1372         if (!(contig & GIV_SRC_CONTIG)) {
1373                 sg_to_sec4_sg(req->assoc, assoc_nents,
1374                               edesc->sec4_sg +
1375                               sec4_sg_index, 0);
1376                 sec4_sg_index += assoc_nents;
1377                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1378                                    iv_dma, ivsize, 0);
1379                 sec4_sg_index += 1;
1380                 sg_to_sec4_sg_last(req->src, src_nents,
1381                                    edesc->sec4_sg +
1382                                    sec4_sg_index, 0);
1383                 sec4_sg_index += src_nents;
1384         }
1385         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1386                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1387                                    iv_dma, ivsize, 0);
1388                 sec4_sg_index += 1;
1389                 sg_to_sec4_sg_last(req->dst, dst_nents,
1390                                    edesc->sec4_sg + sec4_sg_index, 0);
1391         }
1392
1393         return edesc;
1394 }
1395
1396 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1397 {
1398         struct aead_request *req = &areq->areq;
1399         struct aead_edesc *edesc;
1400         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1401         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1402         struct device *jrdev = ctx->jrdev;
1403         u32 contig;
1404         u32 *desc;
1405         int ret = 0;
1406
1407         req->cryptlen += ctx->authsize;
1408
1409         /* allocate extended descriptor */
1410         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1411                                      CAAM_CMD_SZ, &contig);
1412
1413         if (IS_ERR(edesc))
1414                 return PTR_ERR(edesc);
1415
1416 #ifdef DEBUG
1417         print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1418                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1419                        req->cryptlen, 1);
1420 #endif
1421
1422         /* Create and submit job descriptor*/
1423         init_aead_giv_job(ctx->sh_desc_givenc,
1424                           ctx->sh_desc_givenc_dma, edesc, req, contig);
1425 #ifdef DEBUG
1426         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1427                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1428                        desc_bytes(edesc->hw_desc), 1);
1429 #endif
1430
1431         desc = edesc->hw_desc;
1432         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1433         if (!ret) {
1434                 ret = -EINPROGRESS;
1435         } else {
1436                 aead_unmap(jrdev, edesc, req);
1437                 kfree(edesc);
1438         }
1439
1440         return ret;
1441 }
1442
1443 /*
1444  * allocate and map the ablkcipher extended descriptor for ablkcipher
1445  */
1446 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1447                                                        *req, int desc_bytes,
1448                                                        bool *iv_contig_out)
1449 {
1450         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1451         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1452         struct device *jrdev = ctx->jrdev;
1453         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1454                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1455                        GFP_KERNEL : GFP_ATOMIC;
1456         int src_nents, dst_nents = 0, sec4_sg_bytes;
1457         struct ablkcipher_edesc *edesc;
1458         dma_addr_t iv_dma = 0;
1459         bool iv_contig = false;
1460         int sgc;
1461         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1462         int sec4_sg_index;
1463
1464         src_nents = sg_count(req->src, req->nbytes);
1465
1466         if (unlikely(req->dst != req->src))
1467                 dst_nents = sg_count(req->dst, req->nbytes);
1468
1469         if (likely(req->src == req->dst)) {
1470                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1471                                  DMA_BIDIRECTIONAL);
1472         } else {
1473                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1474                                  DMA_TO_DEVICE);
1475                 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1476                                  DMA_FROM_DEVICE);
1477         }
1478
1479         /*
1480          * Check if iv can be contiguous with source and destination.
1481          * If so, include it. If not, create scatterlist.
1482          */
1483         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1484         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1485                 iv_contig = true;
1486         else
1487                 src_nents = src_nents ? : 1;
1488         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1489                         sizeof(struct sec4_sg_entry);
1490
1491         /* allocate space for base edesc and hw desc commands, link tables */
1492         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1493                         sec4_sg_bytes, GFP_DMA | flags);
1494         if (!edesc) {
1495                 dev_err(jrdev, "could not allocate extended descriptor\n");
1496                 return ERR_PTR(-ENOMEM);
1497         }
1498
1499         edesc->src_nents = src_nents;
1500         edesc->dst_nents = dst_nents;
1501         edesc->sec4_sg_bytes = sec4_sg_bytes;
1502         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1503                          desc_bytes;
1504
1505         sec4_sg_index = 0;
1506         if (!iv_contig) {
1507                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1508                 sg_to_sec4_sg_last(req->src, src_nents,
1509                                    edesc->sec4_sg + 1, 0);
1510                 sec4_sg_index += 1 + src_nents;
1511         }
1512
1513         if (unlikely(dst_nents)) {
1514                 sg_to_sec4_sg_last(req->dst, dst_nents,
1515                         edesc->sec4_sg + sec4_sg_index, 0);
1516         }
1517
1518         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1519                                             sec4_sg_bytes, DMA_TO_DEVICE);
1520         edesc->iv_dma = iv_dma;
1521
1522 #ifdef DEBUG
1523         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1524                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1525                        sec4_sg_bytes, 1);
1526 #endif
1527
1528         *iv_contig_out = iv_contig;
1529         return edesc;
1530 }
1531
1532 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1533 {
1534         struct ablkcipher_edesc *edesc;
1535         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1536         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1537         struct device *jrdev = ctx->jrdev;
1538         bool iv_contig;
1539         u32 *desc;
1540         int ret = 0;
1541
1542         /* allocate extended descriptor */
1543         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1544                                        CAAM_CMD_SZ, &iv_contig);
1545         if (IS_ERR(edesc))
1546                 return PTR_ERR(edesc);
1547
1548         /* Create and submit job descriptor*/
1549         init_ablkcipher_job(ctx->sh_desc_enc,
1550                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1551 #ifdef DEBUG
1552         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1553                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1554                        desc_bytes(edesc->hw_desc), 1);
1555 #endif
1556         desc = edesc->hw_desc;
1557         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1558
1559         if (!ret) {
1560                 ret = -EINPROGRESS;
1561         } else {
1562                 ablkcipher_unmap(jrdev, edesc, req);
1563                 kfree(edesc);
1564         }
1565
1566         return ret;
1567 }
1568
1569 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1570 {
1571         struct ablkcipher_edesc *edesc;
1572         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1573         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1574         struct device *jrdev = ctx->jrdev;
1575         bool iv_contig;
1576         u32 *desc;
1577         int ret = 0;
1578
1579         /* allocate extended descriptor */
1580         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1581                                        CAAM_CMD_SZ, &iv_contig);
1582         if (IS_ERR(edesc))
1583                 return PTR_ERR(edesc);
1584
1585         /* Create and submit job descriptor*/
1586         init_ablkcipher_job(ctx->sh_desc_dec,
1587                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1588         desc = edesc->hw_desc;
1589 #ifdef DEBUG
1590         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1591                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1592                        desc_bytes(edesc->hw_desc), 1);
1593 #endif
1594
1595         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1596         if (!ret) {
1597                 ret = -EINPROGRESS;
1598         } else {
1599                 ablkcipher_unmap(jrdev, edesc, req);
1600                 kfree(edesc);
1601         }
1602
1603         return ret;
1604 }
1605
1606 #define template_aead           template_u.aead
1607 #define template_ablkcipher     template_u.ablkcipher
1608 struct caam_alg_template {
1609         char name[CRYPTO_MAX_ALG_NAME];
1610         char driver_name[CRYPTO_MAX_ALG_NAME];
1611         unsigned int blocksize;
1612         u32 type;
1613         union {
1614                 struct ablkcipher_alg ablkcipher;
1615                 struct aead_alg aead;
1616                 struct blkcipher_alg blkcipher;
1617                 struct cipher_alg cipher;
1618                 struct compress_alg compress;
1619                 struct rng_alg rng;
1620         } template_u;
1621         u32 class1_alg_type;
1622         u32 class2_alg_type;
1623         u32 alg_op;
1624 };
1625
1626 static struct caam_alg_template driver_algs[] = {
1627         /* single-pass ipsec_esp descriptor */
1628         {
1629                 .name = "authenc(hmac(md5),cbc(aes))",
1630                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1631                 .blocksize = AES_BLOCK_SIZE,
1632                 .type = CRYPTO_ALG_TYPE_AEAD,
1633                 .template_aead = {
1634                         .setkey = aead_setkey,
1635                         .setauthsize = aead_setauthsize,
1636                         .encrypt = aead_encrypt,
1637                         .decrypt = aead_decrypt,
1638                         .givencrypt = aead_givencrypt,
1639                         .geniv = "<built-in>",
1640                         .ivsize = AES_BLOCK_SIZE,
1641                         .maxauthsize = MD5_DIGEST_SIZE,
1642                         },
1643                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1644                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1645                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1646         },
1647         {
1648                 .name = "authenc(hmac(sha1),cbc(aes))",
1649                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1650                 .blocksize = AES_BLOCK_SIZE,
1651                 .type = CRYPTO_ALG_TYPE_AEAD,
1652                 .template_aead = {
1653                         .setkey = aead_setkey,
1654                         .setauthsize = aead_setauthsize,
1655                         .encrypt = aead_encrypt,
1656                         .decrypt = aead_decrypt,
1657                         .givencrypt = aead_givencrypt,
1658                         .geniv = "<built-in>",
1659                         .ivsize = AES_BLOCK_SIZE,
1660                         .maxauthsize = SHA1_DIGEST_SIZE,
1661                         },
1662                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1663                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1664                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1665         },
1666         {
1667                 .name = "authenc(hmac(sha224),cbc(aes))",
1668                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1669                 .blocksize = AES_BLOCK_SIZE,
1670                 .template_aead = {
1671                         .setkey = aead_setkey,
1672                         .setauthsize = aead_setauthsize,
1673                         .encrypt = aead_encrypt,
1674                         .decrypt = aead_decrypt,
1675                         .givencrypt = aead_givencrypt,
1676                         .geniv = "<built-in>",
1677                         .ivsize = AES_BLOCK_SIZE,
1678                         .maxauthsize = SHA224_DIGEST_SIZE,
1679                         },
1680                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1681                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1682                                    OP_ALG_AAI_HMAC_PRECOMP,
1683                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1684         },
1685         {
1686                 .name = "authenc(hmac(sha256),cbc(aes))",
1687                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1688                 .blocksize = AES_BLOCK_SIZE,
1689                 .type = CRYPTO_ALG_TYPE_AEAD,
1690                 .template_aead = {
1691                         .setkey = aead_setkey,
1692                         .setauthsize = aead_setauthsize,
1693                         .encrypt = aead_encrypt,
1694                         .decrypt = aead_decrypt,
1695                         .givencrypt = aead_givencrypt,
1696                         .geniv = "<built-in>",
1697                         .ivsize = AES_BLOCK_SIZE,
1698                         .maxauthsize = SHA256_DIGEST_SIZE,
1699                         },
1700                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1701                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1702                                    OP_ALG_AAI_HMAC_PRECOMP,
1703                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1704         },
1705         {
1706                 .name = "authenc(hmac(sha384),cbc(aes))",
1707                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1708                 .blocksize = AES_BLOCK_SIZE,
1709                 .template_aead = {
1710                         .setkey = aead_setkey,
1711                         .setauthsize = aead_setauthsize,
1712                         .encrypt = aead_encrypt,
1713                         .decrypt = aead_decrypt,
1714                         .givencrypt = aead_givencrypt,
1715                         .geniv = "<built-in>",
1716                         .ivsize = AES_BLOCK_SIZE,
1717                         .maxauthsize = SHA384_DIGEST_SIZE,
1718                         },
1719                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1720                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1721                                    OP_ALG_AAI_HMAC_PRECOMP,
1722                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1723         },
1724
1725         {
1726                 .name = "authenc(hmac(sha512),cbc(aes))",
1727                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1728                 .blocksize = AES_BLOCK_SIZE,
1729                 .type = CRYPTO_ALG_TYPE_AEAD,
1730                 .template_aead = {
1731                         .setkey = aead_setkey,
1732                         .setauthsize = aead_setauthsize,
1733                         .encrypt = aead_encrypt,
1734                         .decrypt = aead_decrypt,
1735                         .givencrypt = aead_givencrypt,
1736                         .geniv = "<built-in>",
1737                         .ivsize = AES_BLOCK_SIZE,
1738                         .maxauthsize = SHA512_DIGEST_SIZE,
1739                         },
1740                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1741                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1742                                    OP_ALG_AAI_HMAC_PRECOMP,
1743                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1744         },
1745         {
1746                 .name = "authenc(hmac(md5),cbc(des3_ede))",
1747                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1748                 .blocksize = DES3_EDE_BLOCK_SIZE,
1749                 .type = CRYPTO_ALG_TYPE_AEAD,
1750                 .template_aead = {
1751                         .setkey = aead_setkey,
1752                         .setauthsize = aead_setauthsize,
1753                         .encrypt = aead_encrypt,
1754                         .decrypt = aead_decrypt,
1755                         .givencrypt = aead_givencrypt,
1756                         .geniv = "<built-in>",
1757                         .ivsize = DES3_EDE_BLOCK_SIZE,
1758                         .maxauthsize = MD5_DIGEST_SIZE,
1759                         },
1760                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1761                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1762                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1763         },
1764         {
1765                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1766                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1767                 .blocksize = DES3_EDE_BLOCK_SIZE,
1768                 .type = CRYPTO_ALG_TYPE_AEAD,
1769                 .template_aead = {
1770                         .setkey = aead_setkey,
1771                         .setauthsize = aead_setauthsize,
1772                         .encrypt = aead_encrypt,
1773                         .decrypt = aead_decrypt,
1774                         .givencrypt = aead_givencrypt,
1775                         .geniv = "<built-in>",
1776                         .ivsize = DES3_EDE_BLOCK_SIZE,
1777                         .maxauthsize = SHA1_DIGEST_SIZE,
1778                         },
1779                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1780                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1781                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1782         },
1783         {
1784                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1785                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1786                 .blocksize = DES3_EDE_BLOCK_SIZE,
1787                 .template_aead = {
1788                         .setkey = aead_setkey,
1789                         .setauthsize = aead_setauthsize,
1790                         .encrypt = aead_encrypt,
1791                         .decrypt = aead_decrypt,
1792                         .givencrypt = aead_givencrypt,
1793                         .geniv = "<built-in>",
1794                         .ivsize = DES3_EDE_BLOCK_SIZE,
1795                         .maxauthsize = SHA224_DIGEST_SIZE,
1796                         },
1797                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1798                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1799                                    OP_ALG_AAI_HMAC_PRECOMP,
1800                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1801         },
1802         {
1803                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1804                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1805                 .blocksize = DES3_EDE_BLOCK_SIZE,
1806                 .type = CRYPTO_ALG_TYPE_AEAD,
1807                 .template_aead = {
1808                         .setkey = aead_setkey,
1809                         .setauthsize = aead_setauthsize,
1810                         .encrypt = aead_encrypt,
1811                         .decrypt = aead_decrypt,
1812                         .givencrypt = aead_givencrypt,
1813                         .geniv = "<built-in>",
1814                         .ivsize = DES3_EDE_BLOCK_SIZE,
1815                         .maxauthsize = SHA256_DIGEST_SIZE,
1816                         },
1817                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1818                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1819                                    OP_ALG_AAI_HMAC_PRECOMP,
1820                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1821         },
1822         {
1823                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1824                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1825                 .blocksize = DES3_EDE_BLOCK_SIZE,
1826                 .template_aead = {
1827                         .setkey = aead_setkey,
1828                         .setauthsize = aead_setauthsize,
1829                         .encrypt = aead_encrypt,
1830                         .decrypt = aead_decrypt,
1831                         .givencrypt = aead_givencrypt,
1832                         .geniv = "<built-in>",
1833                         .ivsize = DES3_EDE_BLOCK_SIZE,
1834                         .maxauthsize = SHA384_DIGEST_SIZE,
1835                         },
1836                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1837                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1838                                    OP_ALG_AAI_HMAC_PRECOMP,
1839                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1840         },
1841         {
1842                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1843                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1844                 .blocksize = DES3_EDE_BLOCK_SIZE,
1845                 .type = CRYPTO_ALG_TYPE_AEAD,
1846                 .template_aead = {
1847                         .setkey = aead_setkey,
1848                         .setauthsize = aead_setauthsize,
1849                         .encrypt = aead_encrypt,
1850                         .decrypt = aead_decrypt,
1851                         .givencrypt = aead_givencrypt,
1852                         .geniv = "<built-in>",
1853                         .ivsize = DES3_EDE_BLOCK_SIZE,
1854                         .maxauthsize = SHA512_DIGEST_SIZE,
1855                         },
1856                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1857                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1858                                    OP_ALG_AAI_HMAC_PRECOMP,
1859                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1860         },
1861         {
1862                 .name = "authenc(hmac(md5),cbc(des))",
1863                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1864                 .blocksize = DES_BLOCK_SIZE,
1865                 .type = CRYPTO_ALG_TYPE_AEAD,
1866                 .template_aead = {
1867                         .setkey = aead_setkey,
1868                         .setauthsize = aead_setauthsize,
1869                         .encrypt = aead_encrypt,
1870                         .decrypt = aead_decrypt,
1871                         .givencrypt = aead_givencrypt,
1872                         .geniv = "<built-in>",
1873                         .ivsize = DES_BLOCK_SIZE,
1874                         .maxauthsize = MD5_DIGEST_SIZE,
1875                         },
1876                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1877                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1878                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1879         },
1880         {
1881                 .name = "authenc(hmac(sha1),cbc(des))",
1882                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1883                 .blocksize = DES_BLOCK_SIZE,
1884                 .type = CRYPTO_ALG_TYPE_AEAD,
1885                 .template_aead = {
1886                         .setkey = aead_setkey,
1887                         .setauthsize = aead_setauthsize,
1888                         .encrypt = aead_encrypt,
1889                         .decrypt = aead_decrypt,
1890                         .givencrypt = aead_givencrypt,
1891                         .geniv = "<built-in>",
1892                         .ivsize = DES_BLOCK_SIZE,
1893                         .maxauthsize = SHA1_DIGEST_SIZE,
1894                         },
1895                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1896                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1897                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1898         },
1899         {
1900                 .name = "authenc(hmac(sha224),cbc(des))",
1901                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1902                 .blocksize = DES_BLOCK_SIZE,
1903                 .template_aead = {
1904                         .setkey = aead_setkey,
1905                         .setauthsize = aead_setauthsize,
1906                         .encrypt = aead_encrypt,
1907                         .decrypt = aead_decrypt,
1908                         .givencrypt = aead_givencrypt,
1909                         .geniv = "<built-in>",
1910                         .ivsize = DES_BLOCK_SIZE,
1911                         .maxauthsize = SHA224_DIGEST_SIZE,
1912                         },
1913                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1914                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1915                                    OP_ALG_AAI_HMAC_PRECOMP,
1916                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1917         },
1918         {
1919                 .name = "authenc(hmac(sha256),cbc(des))",
1920                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1921                 .blocksize = DES_BLOCK_SIZE,
1922                 .type = CRYPTO_ALG_TYPE_AEAD,
1923                 .template_aead = {
1924                         .setkey = aead_setkey,
1925                         .setauthsize = aead_setauthsize,
1926                         .encrypt = aead_encrypt,
1927                         .decrypt = aead_decrypt,
1928                         .givencrypt = aead_givencrypt,
1929                         .geniv = "<built-in>",
1930                         .ivsize = DES_BLOCK_SIZE,
1931                         .maxauthsize = SHA256_DIGEST_SIZE,
1932                         },
1933                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1934                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1935                                    OP_ALG_AAI_HMAC_PRECOMP,
1936                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1937         },
1938         {
1939                 .name = "authenc(hmac(sha384),cbc(des))",
1940                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1941                 .blocksize = DES_BLOCK_SIZE,
1942                 .template_aead = {
1943                         .setkey = aead_setkey,
1944                         .setauthsize = aead_setauthsize,
1945                         .encrypt = aead_encrypt,
1946                         .decrypt = aead_decrypt,
1947                         .givencrypt = aead_givencrypt,
1948                         .geniv = "<built-in>",
1949                         .ivsize = DES_BLOCK_SIZE,
1950                         .maxauthsize = SHA384_DIGEST_SIZE,
1951                         },
1952                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1953                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1954                                    OP_ALG_AAI_HMAC_PRECOMP,
1955                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1956         },
1957         {
1958                 .name = "authenc(hmac(sha512),cbc(des))",
1959                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1960                 .blocksize = DES_BLOCK_SIZE,
1961                 .type = CRYPTO_ALG_TYPE_AEAD,
1962                 .template_aead = {
1963                         .setkey = aead_setkey,
1964                         .setauthsize = aead_setauthsize,
1965                         .encrypt = aead_encrypt,
1966                         .decrypt = aead_decrypt,
1967                         .givencrypt = aead_givencrypt,
1968                         .geniv = "<built-in>",
1969                         .ivsize = DES_BLOCK_SIZE,
1970                         .maxauthsize = SHA512_DIGEST_SIZE,
1971                         },
1972                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1973                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1974                                    OP_ALG_AAI_HMAC_PRECOMP,
1975                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1976         },
1977         /* ablkcipher descriptor */
1978         {
1979                 .name = "cbc(aes)",
1980                 .driver_name = "cbc-aes-caam",
1981                 .blocksize = AES_BLOCK_SIZE,
1982                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1983                 .template_ablkcipher = {
1984                         .setkey = ablkcipher_setkey,
1985                         .encrypt = ablkcipher_encrypt,
1986                         .decrypt = ablkcipher_decrypt,
1987                         .geniv = "eseqiv",
1988                         .min_keysize = AES_MIN_KEY_SIZE,
1989                         .max_keysize = AES_MAX_KEY_SIZE,
1990                         .ivsize = AES_BLOCK_SIZE,
1991                         },
1992                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1993         },
1994         {
1995                 .name = "cbc(des3_ede)",
1996                 .driver_name = "cbc-3des-caam",
1997                 .blocksize = DES3_EDE_BLOCK_SIZE,
1998                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1999                 .template_ablkcipher = {
2000                         .setkey = ablkcipher_setkey,
2001                         .encrypt = ablkcipher_encrypt,
2002                         .decrypt = ablkcipher_decrypt,
2003                         .geniv = "eseqiv",
2004                         .min_keysize = DES3_EDE_KEY_SIZE,
2005                         .max_keysize = DES3_EDE_KEY_SIZE,
2006                         .ivsize = DES3_EDE_BLOCK_SIZE,
2007                         },
2008                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2009         },
2010         {
2011                 .name = "cbc(des)",
2012                 .driver_name = "cbc-des-caam",
2013                 .blocksize = DES_BLOCK_SIZE,
2014                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2015                 .template_ablkcipher = {
2016                         .setkey = ablkcipher_setkey,
2017                         .encrypt = ablkcipher_encrypt,
2018                         .decrypt = ablkcipher_decrypt,
2019                         .geniv = "eseqiv",
2020                         .min_keysize = DES_KEY_SIZE,
2021                         .max_keysize = DES_KEY_SIZE,
2022                         .ivsize = DES_BLOCK_SIZE,
2023                         },
2024                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2025         }
2026 };
2027
2028 struct caam_crypto_alg {
2029         struct list_head entry;
2030         struct device *ctrldev;
2031         int class1_alg_type;
2032         int class2_alg_type;
2033         int alg_op;
2034         struct crypto_alg crypto_alg;
2035 };
2036
2037 static int caam_cra_init(struct crypto_tfm *tfm)
2038 {
2039         struct crypto_alg *alg = tfm->__crt_alg;
2040         struct caam_crypto_alg *caam_alg =
2041                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2042         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2043         struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2044         int tgt_jr = atomic_inc_return(&priv->tfm_count);
2045
2046         /*
2047          * distribute tfms across job rings to ensure in-order
2048          * crypto request processing per tfm
2049          */
2050         ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2051
2052         /* copy descriptor header template value */
2053         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2054         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2055         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2056
2057         return 0;
2058 }
2059
2060 static void caam_cra_exit(struct crypto_tfm *tfm)
2061 {
2062         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2063
2064         if (ctx->sh_desc_enc_dma &&
2065             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2066                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2067                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2068         if (ctx->sh_desc_dec_dma &&
2069             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2070                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2071                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2072         if (ctx->sh_desc_givenc_dma &&
2073             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2074                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2075                                  desc_bytes(ctx->sh_desc_givenc),
2076                                  DMA_TO_DEVICE);
2077 }
2078
2079 static void __exit caam_algapi_exit(void)
2080 {
2081
2082         struct device_node *dev_node;
2083         struct platform_device *pdev;
2084         struct device *ctrldev;
2085         struct caam_drv_private *priv;
2086         struct caam_crypto_alg *t_alg, *n;
2087
2088         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2089         if (!dev_node) {
2090                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2091                 if (!dev_node)
2092                         return;
2093         }
2094
2095         pdev = of_find_device_by_node(dev_node);
2096         if (!pdev)
2097                 return;
2098
2099         ctrldev = &pdev->dev;
2100         of_node_put(dev_node);
2101         priv = dev_get_drvdata(ctrldev);
2102
2103         if (!priv->alg_list.next)
2104                 return;
2105
2106         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2107                 crypto_unregister_alg(&t_alg->crypto_alg);
2108                 list_del(&t_alg->entry);
2109                 kfree(t_alg);
2110         }
2111 }
2112
2113 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2114                                               struct caam_alg_template
2115                                               *template)
2116 {
2117         struct caam_crypto_alg *t_alg;
2118         struct crypto_alg *alg;
2119
2120         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2121         if (!t_alg) {
2122                 dev_err(ctrldev, "failed to allocate t_alg\n");
2123                 return ERR_PTR(-ENOMEM);
2124         }
2125
2126         alg = &t_alg->crypto_alg;
2127
2128         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2129         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2130                  template->driver_name);
2131         alg->cra_module = THIS_MODULE;
2132         alg->cra_init = caam_cra_init;
2133         alg->cra_exit = caam_cra_exit;
2134         alg->cra_priority = CAAM_CRA_PRIORITY;
2135         alg->cra_blocksize = template->blocksize;
2136         alg->cra_alignmask = 0;
2137         alg->cra_ctxsize = sizeof(struct caam_ctx);
2138         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2139                          template->type;
2140         switch (template->type) {
2141         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2142                 alg->cra_type = &crypto_ablkcipher_type;
2143                 alg->cra_ablkcipher = template->template_ablkcipher;
2144                 break;
2145         case CRYPTO_ALG_TYPE_AEAD:
2146                 alg->cra_type = &crypto_aead_type;
2147                 alg->cra_aead = template->template_aead;
2148                 break;
2149         }
2150
2151         t_alg->class1_alg_type = template->class1_alg_type;
2152         t_alg->class2_alg_type = template->class2_alg_type;
2153         t_alg->alg_op = template->alg_op;
2154         t_alg->ctrldev = ctrldev;
2155
2156         return t_alg;
2157 }
2158
2159 static int __init caam_algapi_init(void)
2160 {
2161         struct device_node *dev_node;
2162         struct platform_device *pdev;
2163         struct device *ctrldev;
2164         struct caam_drv_private *priv;
2165         int i = 0, err = 0;
2166
2167         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2168         if (!dev_node) {
2169                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2170                 if (!dev_node)
2171                         return -ENODEV;
2172         }
2173
2174         pdev = of_find_device_by_node(dev_node);
2175         if (!pdev)
2176                 return -ENODEV;
2177
2178         ctrldev = &pdev->dev;
2179         priv = dev_get_drvdata(ctrldev);
2180         of_node_put(dev_node);
2181
2182         INIT_LIST_HEAD(&priv->alg_list);
2183
2184         atomic_set(&priv->tfm_count, -1);
2185
2186         /* register crypto algorithms the device supports */
2187         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2188                 /* TODO: check if h/w supports alg */
2189                 struct caam_crypto_alg *t_alg;
2190
2191                 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2192                 if (IS_ERR(t_alg)) {
2193                         err = PTR_ERR(t_alg);
2194                         dev_warn(ctrldev, "%s alg allocation failed\n",
2195                                  driver_algs[i].driver_name);
2196                         continue;
2197                 }
2198
2199                 err = crypto_register_alg(&t_alg->crypto_alg);
2200                 if (err) {
2201                         dev_warn(ctrldev, "%s alg registration failed\n",
2202                                 t_alg->crypto_alg.cra_driver_name);
2203                         kfree(t_alg);
2204                 } else
2205                         list_add_tail(&t_alg->entry, &priv->alg_list);
2206         }
2207         if (!list_empty(&priv->alg_list))
2208                 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2209                          (char *)of_get_property(dev_node, "compatible", NULL));
2210
2211         return err;
2212 }
2213
2214 module_init(caam_algapi_init);
2215 module_exit(caam_algapi_exit);
2216
2217 MODULE_LICENSE("GPL");
2218 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2219 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");