]> Pileus Git - ~andy/linux/blob - drivers/crypto/caam/caamalg.c
d0f8df1dcec35549812ab24097f7788c4e7ed468
[~andy/linux] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54
55 /*
56  * crypto alg
57  */
58 #define CAAM_CRA_PRIORITY               3000
59 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
60 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
61                                          SHA512_DIGEST_SIZE * 2)
62 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
63 #define CAAM_MAX_IV_LENGTH              16
64
65 /* length of descriptors text */
66 #define DESC_JOB_IO_LEN                 (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
67
68 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
73 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
74 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
75                                          20 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
77                                          15 * CAAM_CMD_SZ)
78
79 #define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
80                                          CAAM_MAX_KEY_SIZE)
81 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
82
83 #ifdef DEBUG
84 /* for print_hex_dumps with line references */
85 #define xstr(s) str(s)
86 #define str(s) #s
87 #define debug(format, arg...) printk(format, arg)
88 #else
89 #define debug(format, arg...)
90 #endif
91
92 /* Set DK bit in class 1 operation if shared */
93 static inline void append_dec_op1(u32 *desc, u32 type)
94 {
95         u32 *jump_cmd, *uncond_jump_cmd;
96
97         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
98         append_operation(desc, type | OP_ALG_AS_INITFINAL |
99                          OP_ALG_DECRYPT);
100         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
101         set_jump_tgt_here(desc, jump_cmd);
102         append_operation(desc, type | OP_ALG_AS_INITFINAL |
103                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
104         set_jump_tgt_here(desc, uncond_jump_cmd);
105 }
106
107 /*
108  * Wait for completion of class 1 key loading before allowing
109  * error propagation
110  */
111 static inline void append_dec_shr_done(u32 *desc)
112 {
113         u32 *jump_cmd;
114
115         jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
116         set_jump_tgt_here(desc, jump_cmd);
117         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
118 }
119
120 /*
121  * For aead functions, read payload and write payload,
122  * both of which are specified in req->src and req->dst
123  */
124 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
125 {
126         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
127                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
128         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
129 }
130
131 /*
132  * For aead encrypt and decrypt, read iv for both classes
133  */
134 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
135 {
136         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
137                    LDST_CLASS_1_CCB | ivsize);
138         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
139 }
140
141 /*
142  * For ablkcipher encrypt and decrypt, read from req->src and
143  * write to req->dst
144  */
145 static inline void ablkcipher_append_src_dst(u32 *desc)
146 {
147         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
148         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
149         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
150                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
151         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
152 }
153
154 /*
155  * If all data, including src (with assoc and iv) or dst (with iv only) are
156  * contiguous
157  */
158 #define GIV_SRC_CONTIG          1
159 #define GIV_DST_CONTIG          (1 << 1)
160
161 /*
162  * per-session context
163  */
164 struct caam_ctx {
165         struct device *jrdev;
166         u32 sh_desc_enc[DESC_MAX_USED_LEN];
167         u32 sh_desc_dec[DESC_MAX_USED_LEN];
168         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
169         dma_addr_t sh_desc_enc_dma;
170         dma_addr_t sh_desc_dec_dma;
171         dma_addr_t sh_desc_givenc_dma;
172         u32 class1_alg_type;
173         u32 class2_alg_type;
174         u32 alg_op;
175         u8 key[CAAM_MAX_KEY_SIZE];
176         dma_addr_t key_dma;
177         unsigned int enckeylen;
178         unsigned int split_key_len;
179         unsigned int split_key_pad_len;
180         unsigned int authsize;
181 };
182
183 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
184                             int keys_fit_inline)
185 {
186         if (keys_fit_inline) {
187                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
188                                   ctx->split_key_len, CLASS_2 |
189                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
190                 append_key_as_imm(desc, (void *)ctx->key +
191                                   ctx->split_key_pad_len, ctx->enckeylen,
192                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
193         } else {
194                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
195                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
196                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
197                            ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
198         }
199 }
200
201 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
202                                   int keys_fit_inline)
203 {
204         u32 *key_jump_cmd;
205
206         init_sh_desc(desc, HDR_SHARE_WAIT);
207
208         /* Skip if already shared */
209         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
210                                    JUMP_COND_SHRD);
211
212         append_key_aead(desc, ctx, keys_fit_inline);
213
214         set_jump_tgt_here(desc, key_jump_cmd);
215
216         /* Propagate errors from shared to job descriptor */
217         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
218 }
219
220 static int aead_set_sh_desc(struct crypto_aead *aead)
221 {
222         struct aead_tfm *tfm = &aead->base.crt_aead;
223         struct caam_ctx *ctx = crypto_aead_ctx(aead);
224         struct device *jrdev = ctx->jrdev;
225         bool keys_fit_inline = 0;
226         u32 *key_jump_cmd, *jump_cmd;
227         u32 geniv, moveiv;
228         u32 *desc;
229
230         if (!ctx->enckeylen || !ctx->authsize)
231                 return 0;
232
233         /*
234          * Job Descriptor and Shared Descriptors
235          * must all fit into the 64-word Descriptor h/w Buffer
236          */
237         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
238             ctx->split_key_pad_len + ctx->enckeylen <=
239             CAAM_DESC_BYTES_MAX)
240                 keys_fit_inline = 1;
241
242         /* aead_encrypt shared descriptor */
243         desc = ctx->sh_desc_enc;
244
245         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
246
247         /* Class 2 operation */
248         append_operation(desc, ctx->class2_alg_type |
249                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
250
251         /* cryptlen = seqoutlen - authsize */
252         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
253
254         /* assoclen + cryptlen = seqinlen - ivsize */
255         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
256
257         /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
258         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
259
260         /* read assoc before reading payload */
261         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
262                              KEY_VLF);
263         aead_append_ld_iv(desc, tfm->ivsize);
264
265         /* Class 1 operation */
266         append_operation(desc, ctx->class1_alg_type |
267                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
268
269         /* Read and write cryptlen bytes */
270         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
271         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
272         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
273
274         /* Write ICV */
275         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
276                          LDST_SRCDST_BYTE_CONTEXT);
277
278         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
279                                               desc_bytes(desc),
280                                               DMA_TO_DEVICE);
281         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
282                 dev_err(jrdev, "unable to map shared descriptor\n");
283                 return -ENOMEM;
284         }
285 #ifdef DEBUG
286         print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
287                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
288                        desc_bytes(desc), 1);
289 #endif
290
291         /*
292          * Job Descriptor and Shared Descriptors
293          * must all fit into the 64-word Descriptor h/w Buffer
294          */
295         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
296             ctx->split_key_pad_len + ctx->enckeylen <=
297             CAAM_DESC_BYTES_MAX)
298                 keys_fit_inline = 1;
299
300         desc = ctx->sh_desc_dec;
301
302         /* aead_decrypt shared descriptor */
303         init_sh_desc(desc, HDR_SHARE_WAIT);
304
305         /* Skip if already shared */
306         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
307                                    JUMP_COND_SHRD);
308
309         append_key_aead(desc, ctx, keys_fit_inline);
310
311         /* Only propagate error immediately if shared */
312         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
313         set_jump_tgt_here(desc, key_jump_cmd);
314         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
315         set_jump_tgt_here(desc, jump_cmd);
316
317         /* Class 2 operation */
318         append_operation(desc, ctx->class2_alg_type |
319                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
320
321         /* assoclen + cryptlen = seqinlen - ivsize */
322         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
323                                 ctx->authsize + tfm->ivsize)
324         /* assoclen = (assoclen + cryptlen) - cryptlen */
325         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
326         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
327
328         /* read assoc before reading payload */
329         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
330                              KEY_VLF);
331
332         aead_append_ld_iv(desc, tfm->ivsize);
333
334         append_dec_op1(desc, ctx->class1_alg_type);
335
336         /* Read and write cryptlen bytes */
337         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
338         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
339         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
340
341         /* Load ICV */
342         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
343                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
344         append_dec_shr_done(desc);
345
346         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
347                                               desc_bytes(desc),
348                                               DMA_TO_DEVICE);
349         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
350                 dev_err(jrdev, "unable to map shared descriptor\n");
351                 return -ENOMEM;
352         }
353 #ifdef DEBUG
354         print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
355                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
356                        desc_bytes(desc), 1);
357 #endif
358
359         /*
360          * Job Descriptor and Shared Descriptors
361          * must all fit into the 64-word Descriptor h/w Buffer
362          */
363         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
364             ctx->split_key_pad_len + ctx->enckeylen <=
365             CAAM_DESC_BYTES_MAX)
366                 keys_fit_inline = 1;
367
368         /* aead_givencrypt shared descriptor */
369         desc = ctx->sh_desc_givenc;
370
371         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
372
373         /* Generate IV */
374         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
375                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
376                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
377         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
378                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
379         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
380         append_move(desc, MOVE_SRC_INFIFO |
381                     MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
382         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383
384         /* Copy IV to class 1 context */
385         append_move(desc, MOVE_SRC_CLASS1CTX |
386                     MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
387
388         /* Return to encryption */
389         append_operation(desc, ctx->class2_alg_type |
390                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
391
392         /* ivsize + cryptlen = seqoutlen - authsize */
393         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
394
395         /* assoclen = seqinlen - (ivsize + cryptlen) */
396         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
397
398         /* read assoc before reading payload */
399         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
400                              KEY_VLF);
401
402         /* Copy iv from class 1 ctx to class 2 fifo*/
403         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
404                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
405         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
406                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
407         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
408                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
409
410         /* Class 1 operation */
411         append_operation(desc, ctx->class1_alg_type |
412                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
413
414         /* Will write ivsize + cryptlen */
415         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
416
417         /* Not need to reload iv */
418         append_seq_fifo_load(desc, tfm->ivsize,
419                              FIFOLD_CLASS_SKIP);
420
421         /* Will read cryptlen */
422         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
423         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
424
425         /* Write ICV */
426         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
427                          LDST_SRCDST_BYTE_CONTEXT);
428
429         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
430                                                  desc_bytes(desc),
431                                                  DMA_TO_DEVICE);
432         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
433                 dev_err(jrdev, "unable to map shared descriptor\n");
434                 return -ENOMEM;
435         }
436 #ifdef DEBUG
437         print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
438                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
439                        desc_bytes(desc), 1);
440 #endif
441
442         return 0;
443 }
444
445 static int aead_setauthsize(struct crypto_aead *authenc,
446                                     unsigned int authsize)
447 {
448         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
449
450         ctx->authsize = authsize;
451         aead_set_sh_desc(authenc);
452
453         return 0;
454 }
455
456 struct split_key_result {
457         struct completion completion;
458         int err;
459 };
460
461 static void split_key_done(struct device *dev, u32 *desc, u32 err,
462                            void *context)
463 {
464         struct split_key_result *res = context;
465
466 #ifdef DEBUG
467         dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
468 #endif
469
470         if (err) {
471                 char tmp[CAAM_ERROR_STR_MAX];
472
473                 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
474         }
475
476         res->err = err;
477
478         complete(&res->completion);
479 }
480
481 /*
482 get a split ipad/opad key
483
484 Split key generation-----------------------------------------------
485
486 [00] 0xb0810008    jobdesc: stidx=1 share=never len=8
487 [01] 0x04000014        key: class2->keyreg len=20
488                         @0xffe01000
489 [03] 0x84410014  operation: cls2-op sha1 hmac init dec
490 [04] 0x24940000     fifold: class2 msgdata-last2 len=0 imm
491 [05] 0xa4000001       jump: class2 local all ->1 [06]
492 [06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
493                         @0xffe04000
494 */
495 static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
496 {
497         struct device *jrdev = ctx->jrdev;
498         u32 *desc;
499         struct split_key_result result;
500         dma_addr_t dma_addr_in, dma_addr_out;
501         int ret = 0;
502
503         desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
504
505         init_job_desc(desc, 0);
506
507         dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
508                                      DMA_TO_DEVICE);
509         if (dma_mapping_error(jrdev, dma_addr_in)) {
510                 dev_err(jrdev, "unable to map key input memory\n");
511                 kfree(desc);
512                 return -ENOMEM;
513         }
514         append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
515                        KEY_DEST_CLASS_REG);
516
517         /* Sets MDHA up into an HMAC-INIT */
518         append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
519                              OP_ALG_AS_INIT);
520
521         /*
522          * do a FIFO_LOAD of zero, this will trigger the internal key expansion
523            into both pads inside MDHA
524          */
525         append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
526                                 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
527
528         /*
529          * FIFO_STORE with the explicit split-key content store
530          * (0x26 output type)
531          */
532         dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
533                                       DMA_FROM_DEVICE);
534         if (dma_mapping_error(jrdev, dma_addr_out)) {
535                 dev_err(jrdev, "unable to map key output memory\n");
536                 kfree(desc);
537                 return -ENOMEM;
538         }
539         append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
540                           LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
541
542 #ifdef DEBUG
543         print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
544                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
545         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
546                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
547 #endif
548
549         result.err = 0;
550         init_completion(&result.completion);
551
552         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
553         if (!ret) {
554                 /* in progress */
555                 wait_for_completion_interruptible(&result.completion);
556                 ret = result.err;
557 #ifdef DEBUG
558                 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
559                                DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
560                                ctx->split_key_pad_len, 1);
561 #endif
562         }
563
564         dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
565                          DMA_FROM_DEVICE);
566         dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
567
568         kfree(desc);
569
570         return ret;
571 }
572
573 static int aead_setkey(struct crypto_aead *aead,
574                                const u8 *key, unsigned int keylen)
575 {
576         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
577         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
578         struct caam_ctx *ctx = crypto_aead_ctx(aead);
579         struct device *jrdev = ctx->jrdev;
580         struct rtattr *rta = (void *)key;
581         struct crypto_authenc_key_param *param;
582         unsigned int authkeylen;
583         unsigned int enckeylen;
584         int ret = 0;
585
586         param = RTA_DATA(rta);
587         enckeylen = be32_to_cpu(param->enckeylen);
588
589         key += RTA_ALIGN(rta->rta_len);
590         keylen -= RTA_ALIGN(rta->rta_len);
591
592         if (keylen < enckeylen)
593                 goto badkey;
594
595         authkeylen = keylen - enckeylen;
596
597         if (keylen > CAAM_MAX_KEY_SIZE)
598                 goto badkey;
599
600         /* Pick class 2 key length from algorithm submask */
601         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
602                                       OP_ALG_ALGSEL_SHIFT] * 2;
603         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
604
605 #ifdef DEBUG
606         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
607                keylen, enckeylen, authkeylen);
608         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
609                ctx->split_key_len, ctx->split_key_pad_len);
610         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
611                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
612 #endif
613
614         ret = gen_split_key(ctx, key, authkeylen);
615         if (ret) {
616                 goto badkey;
617         }
618
619         /* postpend encryption key to auth split key */
620         memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
621
622         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
623                                        enckeylen, DMA_TO_DEVICE);
624         if (dma_mapping_error(jrdev, ctx->key_dma)) {
625                 dev_err(jrdev, "unable to map key i/o memory\n");
626                 return -ENOMEM;
627         }
628 #ifdef DEBUG
629         print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
630                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
631                        ctx->split_key_pad_len + enckeylen, 1);
632 #endif
633
634         ctx->enckeylen = enckeylen;
635
636         ret = aead_set_sh_desc(aead);
637         if (ret) {
638                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
639                                  enckeylen, DMA_TO_DEVICE);
640         }
641
642         return ret;
643 badkey:
644         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
645         return -EINVAL;
646 }
647
648 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
649                              const u8 *key, unsigned int keylen)
650 {
651         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
652         struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
653         struct device *jrdev = ctx->jrdev;
654         int ret = 0;
655         u32 *key_jump_cmd, *jump_cmd;
656         u32 *desc;
657
658 #ifdef DEBUG
659         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
660                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
661 #endif
662
663         memcpy(ctx->key, key, keylen);
664         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
665                                       DMA_TO_DEVICE);
666         if (dma_mapping_error(jrdev, ctx->key_dma)) {
667                 dev_err(jrdev, "unable to map key i/o memory\n");
668                 return -ENOMEM;
669         }
670         ctx->enckeylen = keylen;
671
672         /* ablkcipher_encrypt shared descriptor */
673         desc = ctx->sh_desc_enc;
674         init_sh_desc(desc, HDR_SHARE_WAIT);
675         /* Skip if already shared */
676         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
677                                    JUMP_COND_SHRD);
678
679         /* Load class1 key only */
680         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
681                           ctx->enckeylen, CLASS_1 |
682                           KEY_DEST_CLASS_REG);
683
684         set_jump_tgt_here(desc, key_jump_cmd);
685
686         /* Propagate errors from shared to job descriptor */
687         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
688
689         /* Load iv */
690         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
691                    LDST_CLASS_1_CCB | tfm->ivsize);
692
693         /* Load operation */
694         append_operation(desc, ctx->class1_alg_type |
695                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696
697         /* Perform operation */
698         ablkcipher_append_src_dst(desc);
699
700         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
701                                               desc_bytes(desc),
702                                               DMA_TO_DEVICE);
703         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
704                 dev_err(jrdev, "unable to map shared descriptor\n");
705                 return -ENOMEM;
706         }
707 #ifdef DEBUG
708         print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
709                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
710                        desc_bytes(desc), 1);
711 #endif
712         /* ablkcipher_decrypt shared descriptor */
713         desc = ctx->sh_desc_dec;
714
715         init_sh_desc(desc, HDR_SHARE_WAIT);
716         /* Skip if already shared */
717         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
718                                    JUMP_COND_SHRD);
719
720         /* Load class1 key only */
721         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
722                           ctx->enckeylen, CLASS_1 |
723                           KEY_DEST_CLASS_REG);
724
725         /* For aead, only propagate error immediately if shared */
726         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
727         set_jump_tgt_here(desc, key_jump_cmd);
728         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
729         set_jump_tgt_here(desc, jump_cmd);
730
731         /* load IV */
732         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
733                    LDST_CLASS_1_CCB | tfm->ivsize);
734
735         /* Choose operation */
736         append_dec_op1(desc, ctx->class1_alg_type);
737
738         /* Perform operation */
739         ablkcipher_append_src_dst(desc);
740
741         /* Wait for key to load before allowing propagating error */
742         append_dec_shr_done(desc);
743
744         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
745                                               desc_bytes(desc),
746                                               DMA_TO_DEVICE);
747         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
748                 dev_err(jrdev, "unable to map shared descriptor\n");
749                 return -ENOMEM;
750         }
751
752 #ifdef DEBUG
753         print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
754                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
755                        desc_bytes(desc), 1);
756 #endif
757
758         return ret;
759 }
760
761 struct link_tbl_entry {
762         u64 ptr;
763         u32 len;
764         u8 reserved;
765         u8 buf_pool_id;
766         u16 offset;
767 };
768
769 /*
770  * aead_edesc - s/w-extended aead descriptor
771  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
772  * @src_nents: number of segments in input scatterlist
773  * @dst_nents: number of segments in output scatterlist
774  * @iv_dma: dma address of iv for checking continuity and link table
775  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
776  * @link_tbl_bytes: length of dma mapped link_tbl space
777  * @link_tbl_dma: bus physical mapped address of h/w link table
778  * @hw_desc: the h/w job descriptor followed by any referenced link tables
779  */
780 struct aead_edesc {
781         int assoc_nents;
782         int src_nents;
783         int dst_nents;
784         dma_addr_t iv_dma;
785         int link_tbl_bytes;
786         dma_addr_t link_tbl_dma;
787         struct link_tbl_entry *link_tbl;
788         u32 hw_desc[0];
789 };
790
791 /*
792  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
793  * @src_nents: number of segments in input scatterlist
794  * @dst_nents: number of segments in output scatterlist
795  * @iv_dma: dma address of iv for checking continuity and link table
796  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
797  * @link_tbl_bytes: length of dma mapped link_tbl space
798  * @link_tbl_dma: bus physical mapped address of h/w link table
799  * @hw_desc: the h/w job descriptor followed by any referenced link tables
800  */
801 struct ablkcipher_edesc {
802         int src_nents;
803         int dst_nents;
804         dma_addr_t iv_dma;
805         int link_tbl_bytes;
806         dma_addr_t link_tbl_dma;
807         struct link_tbl_entry *link_tbl;
808         u32 hw_desc[0];
809 };
810
811 static void caam_unmap(struct device *dev, struct scatterlist *src,
812                        struct scatterlist *dst, int src_nents, int dst_nents,
813                        dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
814                        int link_tbl_bytes)
815 {
816         if (unlikely(dst != src)) {
817                 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
818                 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
819         } else {
820                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
821         }
822
823         if (iv_dma)
824                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
825         if (link_tbl_bytes)
826                 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
827                                  DMA_TO_DEVICE);
828 }
829
830 static void aead_unmap(struct device *dev,
831                        struct aead_edesc *edesc,
832                        struct aead_request *req)
833 {
834         struct crypto_aead *aead = crypto_aead_reqtfm(req);
835         int ivsize = crypto_aead_ivsize(aead);
836
837         dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
838
839         caam_unmap(dev, req->src, req->dst,
840                    edesc->src_nents, edesc->dst_nents,
841                    edesc->iv_dma, ivsize, edesc->link_tbl_dma,
842                    edesc->link_tbl_bytes);
843 }
844
845 static void ablkcipher_unmap(struct device *dev,
846                              struct ablkcipher_edesc *edesc,
847                              struct ablkcipher_request *req)
848 {
849         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
850         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
851
852         caam_unmap(dev, req->src, req->dst,
853                    edesc->src_nents, edesc->dst_nents,
854                    edesc->iv_dma, ivsize, edesc->link_tbl_dma,
855                    edesc->link_tbl_bytes);
856 }
857
858 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
859                                    void *context)
860 {
861         struct aead_request *req = context;
862         struct aead_edesc *edesc;
863 #ifdef DEBUG
864         struct crypto_aead *aead = crypto_aead_reqtfm(req);
865         struct caam_ctx *ctx = crypto_aead_ctx(aead);
866         int ivsize = crypto_aead_ivsize(aead);
867
868         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
869 #endif
870
871         edesc = (struct aead_edesc *)((char *)desc -
872                  offsetof(struct aead_edesc, hw_desc));
873
874         if (err) {
875                 char tmp[CAAM_ERROR_STR_MAX];
876
877                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
878         }
879
880         aead_unmap(jrdev, edesc, req);
881
882 #ifdef DEBUG
883         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
884                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
885                        req->assoclen , 1);
886         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
887                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
888                        edesc->src_nents ? 100 : ivsize, 1);
889         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
890                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
891                        edesc->src_nents ? 100 : req->cryptlen +
892                        ctx->authsize + 4, 1);
893 #endif
894
895         kfree(edesc);
896
897         aead_request_complete(req, err);
898 }
899
900 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
901                                    void *context)
902 {
903         struct aead_request *req = context;
904         struct aead_edesc *edesc;
905 #ifdef DEBUG
906         struct crypto_aead *aead = crypto_aead_reqtfm(req);
907         struct caam_ctx *ctx = crypto_aead_ctx(aead);
908         int ivsize = crypto_aead_ivsize(aead);
909
910         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
911 #endif
912
913         edesc = (struct aead_edesc *)((char *)desc -
914                  offsetof(struct aead_edesc, hw_desc));
915
916 #ifdef DEBUG
917         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
918                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
919                        ivsize, 1);
920         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
921                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
922                        req->cryptlen, 1);
923 #endif
924
925         if (err) {
926                 char tmp[CAAM_ERROR_STR_MAX];
927
928                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
929         }
930
931         aead_unmap(jrdev, edesc, req);
932
933         /*
934          * verify hw auth check passed else return -EBADMSG
935          */
936         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
937                 err = -EBADMSG;
938
939 #ifdef DEBUG
940         print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
941                        DUMP_PREFIX_ADDRESS, 16, 4,
942                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
943                        sizeof(struct iphdr) + req->assoclen +
944                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
945                        ctx->authsize + 36, 1);
946         if (!err && edesc->link_tbl_bytes) {
947                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
948                 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
949                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
950                         sg->length + ctx->authsize + 16, 1);
951         }
952 #endif
953
954         kfree(edesc);
955
956         aead_request_complete(req, err);
957 }
958
959 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
960                                    void *context)
961 {
962         struct ablkcipher_request *req = context;
963         struct ablkcipher_edesc *edesc;
964 #ifdef DEBUG
965         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
966         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
967
968         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
969 #endif
970
971         edesc = (struct ablkcipher_edesc *)((char *)desc -
972                  offsetof(struct ablkcipher_edesc, hw_desc));
973
974         if (err) {
975                 char tmp[CAAM_ERROR_STR_MAX];
976
977                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
978         }
979
980 #ifdef DEBUG
981         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
982                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
983                        edesc->src_nents > 1 ? 100 : ivsize, 1);
984         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
985                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
986                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
987 #endif
988
989         ablkcipher_unmap(jrdev, edesc, req);
990         kfree(edesc);
991
992         ablkcipher_request_complete(req, err);
993 }
994
995 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
996                                     void *context)
997 {
998         struct ablkcipher_request *req = context;
999         struct ablkcipher_edesc *edesc;
1000 #ifdef DEBUG
1001         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1002         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1003
1004         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1005 #endif
1006
1007         edesc = (struct ablkcipher_edesc *)((char *)desc -
1008                  offsetof(struct ablkcipher_edesc, hw_desc));
1009         if (err) {
1010                 char tmp[CAAM_ERROR_STR_MAX];
1011
1012                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1013         }
1014
1015 #ifdef DEBUG
1016         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
1017                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1018                        ivsize, 1);
1019         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
1020                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1021                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1022 #endif
1023
1024         ablkcipher_unmap(jrdev, edesc, req);
1025         kfree(edesc);
1026
1027         ablkcipher_request_complete(req, err);
1028 }
1029
1030 static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
1031                                dma_addr_t dma, u32 len, u32 offset)
1032 {
1033         link_tbl_ptr->ptr = dma;
1034         link_tbl_ptr->len = len;
1035         link_tbl_ptr->reserved = 0;
1036         link_tbl_ptr->buf_pool_id = 0;
1037         link_tbl_ptr->offset = offset;
1038 #ifdef DEBUG
1039         print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
1040                        DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
1041                        sizeof(struct link_tbl_entry), 1);
1042 #endif
1043 }
1044
1045 /*
1046  * convert scatterlist to h/w link table format
1047  * but does not have final bit; instead, returns last entry
1048  */
1049 static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
1050                                              int sg_count, struct link_tbl_entry
1051                                              *link_tbl_ptr, u32 offset)
1052 {
1053         while (sg_count) {
1054                 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
1055                                    sg_dma_len(sg), offset);
1056                 link_tbl_ptr++;
1057                 sg = sg_next(sg);
1058                 sg_count--;
1059         }
1060         return link_tbl_ptr - 1;
1061 }
1062
1063 /*
1064  * convert scatterlist to h/w link table format
1065  * scatterlist must have been previously dma mapped
1066  */
1067 static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
1068                                 struct link_tbl_entry *link_tbl_ptr, u32 offset)
1069 {
1070         link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
1071         link_tbl_ptr->len |= 0x40000000;
1072 }
1073
1074 /*
1075  * Fill in aead job descriptor
1076  */
1077 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1078                           struct aead_edesc *edesc,
1079                           struct aead_request *req,
1080                           bool all_contig, bool encrypt)
1081 {
1082         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1083         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1084         int ivsize = crypto_aead_ivsize(aead);
1085         int authsize = ctx->authsize;
1086         u32 *desc = edesc->hw_desc;
1087         u32 out_options = 0, in_options;
1088         dma_addr_t dst_dma, src_dma;
1089         int len, link_tbl_index = 0;
1090
1091 #ifdef DEBUG
1092         debug("assoclen %d cryptlen %d authsize %d\n",
1093               req->assoclen, req->cryptlen, authsize);
1094         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1095                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1096                        req->assoclen , 1);
1097         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1098                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1099                        edesc->src_nents ? 100 : ivsize, 1);
1100         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1101                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1102                         edesc->src_nents ? 100 : req->cryptlen, 1);
1103         print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1104                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1105                        desc_bytes(sh_desc), 1);
1106 #endif
1107
1108         len = desc_len(sh_desc);
1109         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1110
1111         if (all_contig) {
1112                 src_dma = sg_dma_address(req->assoc);
1113                 in_options = 0;
1114         } else {
1115                 src_dma = edesc->link_tbl_dma;
1116                 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
1117                                   (edesc->src_nents ? : 1);
1118                 in_options = LDST_SGF;
1119         }
1120         if (encrypt)
1121                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1122                                   req->cryptlen - authsize, in_options);
1123         else
1124                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1125                                   req->cryptlen, in_options);
1126
1127         if (likely(req->src == req->dst)) {
1128                 if (all_contig) {
1129                         dst_dma = sg_dma_address(req->src);
1130                 } else {
1131                         dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1132                                   ((edesc->assoc_nents ? : 1) + 1);
1133                         out_options = LDST_SGF;
1134                 }
1135         } else {
1136                 if (!edesc->dst_nents) {
1137                         dst_dma = sg_dma_address(req->dst);
1138                 } else {
1139                         dst_dma = edesc->link_tbl_dma +
1140                                   link_tbl_index *
1141                                   sizeof(struct link_tbl_entry);
1142                         out_options = LDST_SGF;
1143                 }
1144         }
1145         if (encrypt)
1146                 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1147         else
1148                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1149                                    out_options);
1150 }
1151
1152 /*
1153  * Fill in aead givencrypt job descriptor
1154  */
1155 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1156                               struct aead_edesc *edesc,
1157                               struct aead_request *req,
1158                               int contig)
1159 {
1160         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1161         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1162         int ivsize = crypto_aead_ivsize(aead);
1163         int authsize = ctx->authsize;
1164         u32 *desc = edesc->hw_desc;
1165         u32 out_options = 0, in_options;
1166         dma_addr_t dst_dma, src_dma;
1167         int len, link_tbl_index = 0;
1168
1169 #ifdef DEBUG
1170         debug("assoclen %d cryptlen %d authsize %d\n",
1171               req->assoclen, req->cryptlen, authsize);
1172         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1173                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1174                        req->assoclen , 1);
1175         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1176                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1177         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1178                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1179                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1180         print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1181                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1182                        desc_bytes(sh_desc), 1);
1183 #endif
1184
1185         len = desc_len(sh_desc);
1186         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1187
1188         if (contig & GIV_SRC_CONTIG) {
1189                 src_dma = sg_dma_address(req->assoc);
1190                 in_options = 0;
1191         } else {
1192                 src_dma = edesc->link_tbl_dma;
1193                 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
1194                 in_options = LDST_SGF;
1195         }
1196         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1197                           req->cryptlen - authsize, in_options);
1198
1199         if (contig & GIV_DST_CONTIG) {
1200                 dst_dma = edesc->iv_dma;
1201         } else {
1202                 if (likely(req->src == req->dst)) {
1203                         dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1204                                   edesc->assoc_nents;
1205                         out_options = LDST_SGF;
1206                 } else {
1207                         dst_dma = edesc->link_tbl_dma +
1208                                   link_tbl_index *
1209                                   sizeof(struct link_tbl_entry);
1210                         out_options = LDST_SGF;
1211                 }
1212         }
1213
1214         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1215 }
1216
1217 /*
1218  * Fill in ablkcipher job descriptor
1219  */
1220 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1221                                 struct ablkcipher_edesc *edesc,
1222                                 struct ablkcipher_request *req,
1223                                 bool iv_contig)
1224 {
1225         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1226         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1227         u32 *desc = edesc->hw_desc;
1228         u32 out_options = 0, in_options;
1229         dma_addr_t dst_dma, src_dma;
1230         int len, link_tbl_index = 0;
1231
1232 #ifdef DEBUG
1233         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1234                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1235                        ivsize, 1);
1236         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1237                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1238                        edesc->src_nents ? 100 : req->nbytes, 1);
1239 #endif
1240
1241         len = desc_len(sh_desc);
1242         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1243
1244         if (iv_contig) {
1245                 src_dma = edesc->iv_dma;
1246                 in_options = 0;
1247         } else {
1248                 src_dma = edesc->link_tbl_dma;
1249                 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1250                 in_options = LDST_SGF;
1251         }
1252         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1253
1254         if (likely(req->src == req->dst)) {
1255                 if (!edesc->src_nents && iv_contig) {
1256                         dst_dma = sg_dma_address(req->src);
1257                 } else {
1258                         dst_dma = edesc->link_tbl_dma +
1259                                 sizeof(struct link_tbl_entry);
1260                         out_options = LDST_SGF;
1261                 }
1262         } else {
1263                 if (!edesc->dst_nents) {
1264                         dst_dma = sg_dma_address(req->dst);
1265                 } else {
1266                         dst_dma = edesc->link_tbl_dma +
1267                                 link_tbl_index * sizeof(struct link_tbl_entry);
1268                         out_options = LDST_SGF;
1269                 }
1270         }
1271         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1272 }
1273
1274 /*
1275  * derive number of elements in scatterlist
1276  */
1277 static int sg_count(struct scatterlist *sg_list, int nbytes)
1278 {
1279         struct scatterlist *sg = sg_list;
1280         int sg_nents = 0;
1281
1282         while (nbytes > 0) {
1283                 sg_nents++;
1284                 nbytes -= sg->length;
1285                 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1286                         BUG(); /* Not support chaining */
1287                 sg = scatterwalk_sg_next(sg);
1288         }
1289
1290         if (likely(sg_nents == 1))
1291                 return 0;
1292
1293         return sg_nents;
1294 }
1295
1296 /*
1297  * allocate and map the aead extended descriptor
1298  */
1299 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1300                                            int desc_bytes, bool *all_contig_ptr)
1301 {
1302         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1303         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1304         struct device *jrdev = ctx->jrdev;
1305         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1306                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1307         int assoc_nents, src_nents, dst_nents = 0;
1308         struct aead_edesc *edesc;
1309         dma_addr_t iv_dma = 0;
1310         int sgc;
1311         bool all_contig = true;
1312         int ivsize = crypto_aead_ivsize(aead);
1313         int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1314
1315         assoc_nents = sg_count(req->assoc, req->assoclen);
1316         src_nents = sg_count(req->src, req->cryptlen);
1317
1318         if (unlikely(req->dst != req->src))
1319                 dst_nents = sg_count(req->dst, req->cryptlen);
1320
1321         sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1322                          DMA_BIDIRECTIONAL);
1323         if (likely(req->src == req->dst)) {
1324                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1325                                  DMA_BIDIRECTIONAL);
1326         } else {
1327                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1328                                  DMA_TO_DEVICE);
1329                 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1330                                  DMA_FROM_DEVICE);
1331         }
1332
1333         /* Check if data are contiguous */
1334         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1335         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1336             iv_dma || src_nents || iv_dma + ivsize !=
1337             sg_dma_address(req->src)) {
1338                 all_contig = false;
1339                 assoc_nents = assoc_nents ? : 1;
1340                 src_nents = src_nents ? : 1;
1341                 link_tbl_len = assoc_nents + 1 + src_nents;
1342         }
1343         link_tbl_len += dst_nents;
1344
1345         link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1346
1347         /* allocate space for base edesc and hw desc commands, link tables */
1348         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1349                         link_tbl_bytes, GFP_DMA | flags);
1350         if (!edesc) {
1351                 dev_err(jrdev, "could not allocate extended descriptor\n");
1352                 return ERR_PTR(-ENOMEM);
1353         }
1354
1355         edesc->assoc_nents = assoc_nents;
1356         edesc->src_nents = src_nents;
1357         edesc->dst_nents = dst_nents;
1358         edesc->iv_dma = iv_dma;
1359         edesc->link_tbl_bytes = link_tbl_bytes;
1360         edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1361                           desc_bytes;
1362         edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1363                                              link_tbl_bytes, DMA_TO_DEVICE);
1364         *all_contig_ptr = all_contig;
1365
1366         link_tbl_index = 0;
1367         if (!all_contig) {
1368                 sg_to_link_tbl(req->assoc,
1369                                (assoc_nents ? : 1),
1370                                edesc->link_tbl +
1371                                link_tbl_index, 0);
1372                 link_tbl_index += assoc_nents ? : 1;
1373                 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1374                                    iv_dma, ivsize, 0);
1375                 link_tbl_index += 1;
1376                 sg_to_link_tbl_last(req->src,
1377                                     (src_nents ? : 1),
1378                                     edesc->link_tbl +
1379                                     link_tbl_index, 0);
1380                 link_tbl_index += src_nents ? : 1;
1381         }
1382         if (dst_nents) {
1383                 sg_to_link_tbl_last(req->dst, dst_nents,
1384                                     edesc->link_tbl + link_tbl_index, 0);
1385         }
1386
1387         return edesc;
1388 }
1389
1390 static int aead_encrypt(struct aead_request *req)
1391 {
1392         struct aead_edesc *edesc;
1393         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1394         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1395         struct device *jrdev = ctx->jrdev;
1396         bool all_contig;
1397         u32 *desc;
1398         int ret = 0;
1399
1400         req->cryptlen += ctx->authsize;
1401
1402         /* allocate extended descriptor */
1403         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1404                                  CAAM_CMD_SZ, &all_contig);
1405         if (IS_ERR(edesc))
1406                 return PTR_ERR(edesc);
1407
1408         /* Create and submit job descriptor */
1409         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1410                       all_contig, true);
1411 #ifdef DEBUG
1412         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1413                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1414                        desc_bytes(edesc->hw_desc), 1);
1415 #endif
1416
1417         desc = edesc->hw_desc;
1418         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1419         if (!ret) {
1420                 ret = -EINPROGRESS;
1421         } else {
1422                 aead_unmap(jrdev, edesc, req);
1423                 kfree(edesc);
1424         }
1425
1426         return ret;
1427 }
1428
1429 static int aead_decrypt(struct aead_request *req)
1430 {
1431         struct aead_edesc *edesc;
1432         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1433         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1434         struct device *jrdev = ctx->jrdev;
1435         bool all_contig;
1436         u32 *desc;
1437         int ret = 0;
1438
1439         /* allocate extended descriptor */
1440         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1441                                  CAAM_CMD_SZ, &all_contig);
1442         if (IS_ERR(edesc))
1443                 return PTR_ERR(edesc);
1444
1445 #ifdef DEBUG
1446         print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1447                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1448                        req->cryptlen, 1);
1449 #endif
1450
1451         /* Create and submit job descriptor*/
1452         init_aead_job(ctx->sh_desc_dec,
1453                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1454 #ifdef DEBUG
1455         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1456                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1457                        desc_bytes(edesc->hw_desc), 1);
1458 #endif
1459
1460         desc = edesc->hw_desc;
1461         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1462         if (!ret) {
1463                 ret = -EINPROGRESS;
1464         } else {
1465                 aead_unmap(jrdev, edesc, req);
1466                 kfree(edesc);
1467         }
1468
1469         return ret;
1470 }
1471
1472 /*
1473  * allocate and map the aead extended descriptor for aead givencrypt
1474  */
1475 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1476                                                *greq, int desc_bytes,
1477                                                u32 *contig_ptr)
1478 {
1479         struct aead_request *req = &greq->areq;
1480         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1481         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1482         struct device *jrdev = ctx->jrdev;
1483         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1484                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1485         int assoc_nents, src_nents, dst_nents = 0;
1486         struct aead_edesc *edesc;
1487         dma_addr_t iv_dma = 0;
1488         int sgc;
1489         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1490         int ivsize = crypto_aead_ivsize(aead);
1491         int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1492
1493         assoc_nents = sg_count(req->assoc, req->assoclen);
1494         src_nents = sg_count(req->src, req->cryptlen);
1495
1496         if (unlikely(req->dst != req->src))
1497                 dst_nents = sg_count(req->dst, req->cryptlen);
1498
1499         sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1500                          DMA_BIDIRECTIONAL);
1501         if (likely(req->src == req->dst)) {
1502                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1503                                  DMA_BIDIRECTIONAL);
1504         } else {
1505                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1506                                  DMA_TO_DEVICE);
1507                 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1508                                  DMA_FROM_DEVICE);
1509         }
1510
1511         /* Check if data are contiguous */
1512         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1513         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1514             iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1515                 contig &= ~GIV_SRC_CONTIG;
1516         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1517                 contig &= ~GIV_DST_CONTIG;
1518                 if (unlikely(req->src != req->dst)) {
1519                         dst_nents = dst_nents ? : 1;
1520                         link_tbl_len += 1;
1521                 }
1522         if (!(contig & GIV_SRC_CONTIG)) {
1523                 assoc_nents = assoc_nents ? : 1;
1524                 src_nents = src_nents ? : 1;
1525                 link_tbl_len += assoc_nents + 1 + src_nents;
1526                 if (likely(req->src == req->dst))
1527                         contig &= ~GIV_DST_CONTIG;
1528         }
1529         link_tbl_len += dst_nents;
1530
1531         link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1532
1533         /* allocate space for base edesc and hw desc commands, link tables */
1534         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1535                         link_tbl_bytes, GFP_DMA | flags);
1536         if (!edesc) {
1537                 dev_err(jrdev, "could not allocate extended descriptor\n");
1538                 return ERR_PTR(-ENOMEM);
1539         }
1540
1541         edesc->assoc_nents = assoc_nents;
1542         edesc->src_nents = src_nents;
1543         edesc->dst_nents = dst_nents;
1544         edesc->iv_dma = iv_dma;
1545         edesc->link_tbl_bytes = link_tbl_bytes;
1546         edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1547                           desc_bytes;
1548         edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1549                                              link_tbl_bytes, DMA_TO_DEVICE);
1550         *contig_ptr = contig;
1551
1552         link_tbl_index = 0;
1553         if (!(contig & GIV_SRC_CONTIG)) {
1554                 sg_to_link_tbl(req->assoc, assoc_nents,
1555                                edesc->link_tbl +
1556                                link_tbl_index, 0);
1557                 link_tbl_index += assoc_nents;
1558                 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1559                                    iv_dma, ivsize, 0);
1560                 link_tbl_index += 1;
1561                 sg_to_link_tbl_last(req->src, src_nents,
1562                                     edesc->link_tbl +
1563                                     link_tbl_index, 0);
1564                 link_tbl_index += src_nents;
1565         }
1566         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1567                 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1568                                    iv_dma, ivsize, 0);
1569                 link_tbl_index += 1;
1570                 sg_to_link_tbl_last(req->dst, dst_nents,
1571                                     edesc->link_tbl + link_tbl_index, 0);
1572         }
1573
1574         return edesc;
1575 }
1576
1577 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1578 {
1579         struct aead_request *req = &areq->areq;
1580         struct aead_edesc *edesc;
1581         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1582         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1583         struct device *jrdev = ctx->jrdev;
1584         u32 contig;
1585         u32 *desc;
1586         int ret = 0;
1587
1588         req->cryptlen += ctx->authsize;
1589
1590         /* allocate extended descriptor */
1591         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1592                                      CAAM_CMD_SZ, &contig);
1593
1594         if (IS_ERR(edesc))
1595                 return PTR_ERR(edesc);
1596
1597 #ifdef DEBUG
1598         print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1599                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1600                        req->cryptlen, 1);
1601 #endif
1602
1603         /* Create and submit job descriptor*/
1604         init_aead_giv_job(ctx->sh_desc_givenc,
1605                           ctx->sh_desc_givenc_dma, edesc, req, contig);
1606 #ifdef DEBUG
1607         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1608                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1609                        desc_bytes(edesc->hw_desc), 1);
1610 #endif
1611
1612         desc = edesc->hw_desc;
1613         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1614         if (!ret) {
1615                 ret = -EINPROGRESS;
1616         } else {
1617                 aead_unmap(jrdev, edesc, req);
1618                 kfree(edesc);
1619         }
1620
1621         return ret;
1622 }
1623
1624 /*
1625  * allocate and map the ablkcipher extended descriptor for ablkcipher
1626  */
1627 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1628                                                        *req, int desc_bytes,
1629                                                        bool *iv_contig_out)
1630 {
1631         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1632         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1633         struct device *jrdev = ctx->jrdev;
1634         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1635                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1636                        GFP_KERNEL : GFP_ATOMIC;
1637         int src_nents, dst_nents = 0, link_tbl_bytes;
1638         struct ablkcipher_edesc *edesc;
1639         dma_addr_t iv_dma = 0;
1640         bool iv_contig = false;
1641         int sgc;
1642         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1643         int link_tbl_index;
1644
1645         src_nents = sg_count(req->src, req->nbytes);
1646
1647         if (unlikely(req->dst != req->src))
1648                 dst_nents = sg_count(req->dst, req->nbytes);
1649
1650         if (likely(req->src == req->dst)) {
1651                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1652                                  DMA_BIDIRECTIONAL);
1653         } else {
1654                 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1655                                  DMA_TO_DEVICE);
1656                 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1657                                  DMA_FROM_DEVICE);
1658         }
1659
1660         /*
1661          * Check if iv can be contiguous with source and destination.
1662          * If so, include it. If not, create scatterlist.
1663          */
1664         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1665         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1666                 iv_contig = true;
1667         else
1668                 src_nents = src_nents ? : 1;
1669         link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1670                          sizeof(struct link_tbl_entry);
1671
1672         /* allocate space for base edesc and hw desc commands, link tables */
1673         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1674                         link_tbl_bytes, GFP_DMA | flags);
1675         if (!edesc) {
1676                 dev_err(jrdev, "could not allocate extended descriptor\n");
1677                 return ERR_PTR(-ENOMEM);
1678         }
1679
1680         edesc->src_nents = src_nents;
1681         edesc->dst_nents = dst_nents;
1682         edesc->link_tbl_bytes = link_tbl_bytes;
1683         edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1684                           desc_bytes;
1685
1686         link_tbl_index = 0;
1687         if (!iv_contig) {
1688                 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
1689                 sg_to_link_tbl_last(req->src, src_nents,
1690                                     edesc->link_tbl + 1, 0);
1691                 link_tbl_index += 1 + src_nents;
1692         }
1693
1694         if (unlikely(dst_nents)) {
1695                 sg_to_link_tbl_last(req->dst, dst_nents,
1696                         edesc->link_tbl + link_tbl_index, 0);
1697         }
1698
1699         edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1700                                              link_tbl_bytes, DMA_TO_DEVICE);
1701         edesc->iv_dma = iv_dma;
1702
1703 #ifdef DEBUG
1704         print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
1705                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
1706                        link_tbl_bytes, 1);
1707 #endif
1708
1709         *iv_contig_out = iv_contig;
1710         return edesc;
1711 }
1712
1713 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1714 {
1715         struct ablkcipher_edesc *edesc;
1716         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1717         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1718         struct device *jrdev = ctx->jrdev;
1719         bool iv_contig;
1720         u32 *desc;
1721         int ret = 0;
1722
1723         /* allocate extended descriptor */
1724         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1725                                        CAAM_CMD_SZ, &iv_contig);
1726         if (IS_ERR(edesc))
1727                 return PTR_ERR(edesc);
1728
1729         /* Create and submit job descriptor*/
1730         init_ablkcipher_job(ctx->sh_desc_enc,
1731                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1732 #ifdef DEBUG
1733         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1734                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1735                        desc_bytes(edesc->hw_desc), 1);
1736 #endif
1737         desc = edesc->hw_desc;
1738         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1739
1740         if (!ret) {
1741                 ret = -EINPROGRESS;
1742         } else {
1743                 ablkcipher_unmap(jrdev, edesc, req);
1744                 kfree(edesc);
1745         }
1746
1747         return ret;
1748 }
1749
1750 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1751 {
1752         struct ablkcipher_edesc *edesc;
1753         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1754         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1755         struct device *jrdev = ctx->jrdev;
1756         bool iv_contig;
1757         u32 *desc;
1758         int ret = 0;
1759
1760         /* allocate extended descriptor */
1761         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1762                                        CAAM_CMD_SZ, &iv_contig);
1763         if (IS_ERR(edesc))
1764                 return PTR_ERR(edesc);
1765
1766         /* Create and submit job descriptor*/
1767         init_ablkcipher_job(ctx->sh_desc_dec,
1768                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1769         desc = edesc->hw_desc;
1770 #ifdef DEBUG
1771         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1772                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1773                        desc_bytes(edesc->hw_desc), 1);
1774 #endif
1775
1776         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1777         if (!ret) {
1778                 ret = -EINPROGRESS;
1779         } else {
1780                 ablkcipher_unmap(jrdev, edesc, req);
1781                 kfree(edesc);
1782         }
1783
1784         return ret;
1785 }
1786
1787 #define template_aead           template_u.aead
1788 #define template_ablkcipher     template_u.ablkcipher
1789 struct caam_alg_template {
1790         char name[CRYPTO_MAX_ALG_NAME];
1791         char driver_name[CRYPTO_MAX_ALG_NAME];
1792         unsigned int blocksize;
1793         u32 type;
1794         union {
1795                 struct ablkcipher_alg ablkcipher;
1796                 struct aead_alg aead;
1797                 struct blkcipher_alg blkcipher;
1798                 struct cipher_alg cipher;
1799                 struct compress_alg compress;
1800                 struct rng_alg rng;
1801         } template_u;
1802         u32 class1_alg_type;
1803         u32 class2_alg_type;
1804         u32 alg_op;
1805 };
1806
1807 static struct caam_alg_template driver_algs[] = {
1808         /* single-pass ipsec_esp descriptor */
1809         {
1810                 .name = "authenc(hmac(md5),cbc(aes))",
1811                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1812                 .blocksize = AES_BLOCK_SIZE,
1813                 .type = CRYPTO_ALG_TYPE_AEAD,
1814                 .template_aead = {
1815                         .setkey = aead_setkey,
1816                         .setauthsize = aead_setauthsize,
1817                         .encrypt = aead_encrypt,
1818                         .decrypt = aead_decrypt,
1819                         .givencrypt = aead_givencrypt,
1820                         .geniv = "<built-in>",
1821                         .ivsize = AES_BLOCK_SIZE,
1822                         .maxauthsize = MD5_DIGEST_SIZE,
1823                         },
1824                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1825                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1826                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1827         },
1828         {
1829                 .name = "authenc(hmac(sha1),cbc(aes))",
1830                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1831                 .blocksize = AES_BLOCK_SIZE,
1832                 .type = CRYPTO_ALG_TYPE_AEAD,
1833                 .template_aead = {
1834                         .setkey = aead_setkey,
1835                         .setauthsize = aead_setauthsize,
1836                         .encrypt = aead_encrypt,
1837                         .decrypt = aead_decrypt,
1838                         .givencrypt = aead_givencrypt,
1839                         .geniv = "<built-in>",
1840                         .ivsize = AES_BLOCK_SIZE,
1841                         .maxauthsize = SHA1_DIGEST_SIZE,
1842                         },
1843                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1844                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1845                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1846         },
1847         {
1848                 .name = "authenc(hmac(sha224),cbc(aes))",
1849                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1850                 .blocksize = AES_BLOCK_SIZE,
1851                 .template_aead = {
1852                         .setkey = aead_setkey,
1853                         .setauthsize = aead_setauthsize,
1854                         .encrypt = aead_encrypt,
1855                         .decrypt = aead_decrypt,
1856                         .givencrypt = aead_givencrypt,
1857                         .geniv = "<built-in>",
1858                         .ivsize = AES_BLOCK_SIZE,
1859                         .maxauthsize = SHA224_DIGEST_SIZE,
1860                         },
1861                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1862                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1863                                    OP_ALG_AAI_HMAC_PRECOMP,
1864                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1865         },
1866         {
1867                 .name = "authenc(hmac(sha256),cbc(aes))",
1868                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1869                 .blocksize = AES_BLOCK_SIZE,
1870                 .type = CRYPTO_ALG_TYPE_AEAD,
1871                 .template_aead = {
1872                         .setkey = aead_setkey,
1873                         .setauthsize = aead_setauthsize,
1874                         .encrypt = aead_encrypt,
1875                         .decrypt = aead_decrypt,
1876                         .givencrypt = aead_givencrypt,
1877                         .geniv = "<built-in>",
1878                         .ivsize = AES_BLOCK_SIZE,
1879                         .maxauthsize = SHA256_DIGEST_SIZE,
1880                         },
1881                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1882                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1883                                    OP_ALG_AAI_HMAC_PRECOMP,
1884                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1885         },
1886         {
1887                 .name = "authenc(hmac(sha384),cbc(aes))",
1888                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1889                 .blocksize = AES_BLOCK_SIZE,
1890                 .template_aead = {
1891                         .setkey = aead_setkey,
1892                         .setauthsize = aead_setauthsize,
1893                         .encrypt = aead_encrypt,
1894                         .decrypt = aead_decrypt,
1895                         .givencrypt = aead_givencrypt,
1896                         .geniv = "<built-in>",
1897                         .ivsize = AES_BLOCK_SIZE,
1898                         .maxauthsize = SHA384_DIGEST_SIZE,
1899                         },
1900                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1901                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1902                                    OP_ALG_AAI_HMAC_PRECOMP,
1903                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1904         },
1905
1906         {
1907                 .name = "authenc(hmac(sha512),cbc(aes))",
1908                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1909                 .blocksize = AES_BLOCK_SIZE,
1910                 .type = CRYPTO_ALG_TYPE_AEAD,
1911                 .template_aead = {
1912                         .setkey = aead_setkey,
1913                         .setauthsize = aead_setauthsize,
1914                         .encrypt = aead_encrypt,
1915                         .decrypt = aead_decrypt,
1916                         .givencrypt = aead_givencrypt,
1917                         .geniv = "<built-in>",
1918                         .ivsize = AES_BLOCK_SIZE,
1919                         .maxauthsize = SHA512_DIGEST_SIZE,
1920                         },
1921                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1922                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1923                                    OP_ALG_AAI_HMAC_PRECOMP,
1924                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1925         },
1926         {
1927                 .name = "authenc(hmac(md5),cbc(des3_ede))",
1928                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1929                 .blocksize = DES3_EDE_BLOCK_SIZE,
1930                 .type = CRYPTO_ALG_TYPE_AEAD,
1931                 .template_aead = {
1932                         .setkey = aead_setkey,
1933                         .setauthsize = aead_setauthsize,
1934                         .encrypt = aead_encrypt,
1935                         .decrypt = aead_decrypt,
1936                         .givencrypt = aead_givencrypt,
1937                         .geniv = "<built-in>",
1938                         .ivsize = DES3_EDE_BLOCK_SIZE,
1939                         .maxauthsize = MD5_DIGEST_SIZE,
1940                         },
1941                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1942                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1943                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1944         },
1945         {
1946                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1947                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1948                 .blocksize = DES3_EDE_BLOCK_SIZE,
1949                 .type = CRYPTO_ALG_TYPE_AEAD,
1950                 .template_aead = {
1951                         .setkey = aead_setkey,
1952                         .setauthsize = aead_setauthsize,
1953                         .encrypt = aead_encrypt,
1954                         .decrypt = aead_decrypt,
1955                         .givencrypt = aead_givencrypt,
1956                         .geniv = "<built-in>",
1957                         .ivsize = DES3_EDE_BLOCK_SIZE,
1958                         .maxauthsize = SHA1_DIGEST_SIZE,
1959                         },
1960                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1961                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1962                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1963         },
1964         {
1965                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1966                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1967                 .blocksize = DES3_EDE_BLOCK_SIZE,
1968                 .template_aead = {
1969                         .setkey = aead_setkey,
1970                         .setauthsize = aead_setauthsize,
1971                         .encrypt = aead_encrypt,
1972                         .decrypt = aead_decrypt,
1973                         .givencrypt = aead_givencrypt,
1974                         .geniv = "<built-in>",
1975                         .ivsize = DES3_EDE_BLOCK_SIZE,
1976                         .maxauthsize = SHA224_DIGEST_SIZE,
1977                         },
1978                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1979                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1980                                    OP_ALG_AAI_HMAC_PRECOMP,
1981                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1982         },
1983         {
1984                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1985                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1986                 .blocksize = DES3_EDE_BLOCK_SIZE,
1987                 .type = CRYPTO_ALG_TYPE_AEAD,
1988                 .template_aead = {
1989                         .setkey = aead_setkey,
1990                         .setauthsize = aead_setauthsize,
1991                         .encrypt = aead_encrypt,
1992                         .decrypt = aead_decrypt,
1993                         .givencrypt = aead_givencrypt,
1994                         .geniv = "<built-in>",
1995                         .ivsize = DES3_EDE_BLOCK_SIZE,
1996                         .maxauthsize = SHA256_DIGEST_SIZE,
1997                         },
1998                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1999                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2000                                    OP_ALG_AAI_HMAC_PRECOMP,
2001                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2002         },
2003         {
2004                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2005                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2006                 .blocksize = DES3_EDE_BLOCK_SIZE,
2007                 .template_aead = {
2008                         .setkey = aead_setkey,
2009                         .setauthsize = aead_setauthsize,
2010                         .encrypt = aead_encrypt,
2011                         .decrypt = aead_decrypt,
2012                         .givencrypt = aead_givencrypt,
2013                         .geniv = "<built-in>",
2014                         .ivsize = DES3_EDE_BLOCK_SIZE,
2015                         .maxauthsize = SHA384_DIGEST_SIZE,
2016                         },
2017                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2018                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2019                                    OP_ALG_AAI_HMAC_PRECOMP,
2020                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2021         },
2022         {
2023                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2024                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2025                 .blocksize = DES3_EDE_BLOCK_SIZE,
2026                 .type = CRYPTO_ALG_TYPE_AEAD,
2027                 .template_aead = {
2028                         .setkey = aead_setkey,
2029                         .setauthsize = aead_setauthsize,
2030                         .encrypt = aead_encrypt,
2031                         .decrypt = aead_decrypt,
2032                         .givencrypt = aead_givencrypt,
2033                         .geniv = "<built-in>",
2034                         .ivsize = DES3_EDE_BLOCK_SIZE,
2035                         .maxauthsize = SHA512_DIGEST_SIZE,
2036                         },
2037                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2038                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2039                                    OP_ALG_AAI_HMAC_PRECOMP,
2040                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2041         },
2042         {
2043                 .name = "authenc(hmac(md5),cbc(des))",
2044                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2045                 .blocksize = DES_BLOCK_SIZE,
2046                 .type = CRYPTO_ALG_TYPE_AEAD,
2047                 .template_aead = {
2048                         .setkey = aead_setkey,
2049                         .setauthsize = aead_setauthsize,
2050                         .encrypt = aead_encrypt,
2051                         .decrypt = aead_decrypt,
2052                         .givencrypt = aead_givencrypt,
2053                         .geniv = "<built-in>",
2054                         .ivsize = DES_BLOCK_SIZE,
2055                         .maxauthsize = MD5_DIGEST_SIZE,
2056                         },
2057                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2058                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2059                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2060         },
2061         {
2062                 .name = "authenc(hmac(sha1),cbc(des))",
2063                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2064                 .blocksize = DES_BLOCK_SIZE,
2065                 .type = CRYPTO_ALG_TYPE_AEAD,
2066                 .template_aead = {
2067                         .setkey = aead_setkey,
2068                         .setauthsize = aead_setauthsize,
2069                         .encrypt = aead_encrypt,
2070                         .decrypt = aead_decrypt,
2071                         .givencrypt = aead_givencrypt,
2072                         .geniv = "<built-in>",
2073                         .ivsize = DES_BLOCK_SIZE,
2074                         .maxauthsize = SHA1_DIGEST_SIZE,
2075                         },
2076                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2077                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2078                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2079         },
2080         {
2081                 .name = "authenc(hmac(sha224),cbc(des))",
2082                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2083                 .blocksize = DES_BLOCK_SIZE,
2084                 .template_aead = {
2085                         .setkey = aead_setkey,
2086                         .setauthsize = aead_setauthsize,
2087                         .encrypt = aead_encrypt,
2088                         .decrypt = aead_decrypt,
2089                         .givencrypt = aead_givencrypt,
2090                         .geniv = "<built-in>",
2091                         .ivsize = DES_BLOCK_SIZE,
2092                         .maxauthsize = SHA224_DIGEST_SIZE,
2093                         },
2094                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2095                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2096                                    OP_ALG_AAI_HMAC_PRECOMP,
2097                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2098         },
2099         {
2100                 .name = "authenc(hmac(sha256),cbc(des))",
2101                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2102                 .blocksize = DES_BLOCK_SIZE,
2103                 .type = CRYPTO_ALG_TYPE_AEAD,
2104                 .template_aead = {
2105                         .setkey = aead_setkey,
2106                         .setauthsize = aead_setauthsize,
2107                         .encrypt = aead_encrypt,
2108                         .decrypt = aead_decrypt,
2109                         .givencrypt = aead_givencrypt,
2110                         .geniv = "<built-in>",
2111                         .ivsize = DES_BLOCK_SIZE,
2112                         .maxauthsize = SHA256_DIGEST_SIZE,
2113                         },
2114                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2115                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2116                                    OP_ALG_AAI_HMAC_PRECOMP,
2117                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2118         },
2119         {
2120                 .name = "authenc(hmac(sha384),cbc(des))",
2121                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2122                 .blocksize = DES_BLOCK_SIZE,
2123                 .template_aead = {
2124                         .setkey = aead_setkey,
2125                         .setauthsize = aead_setauthsize,
2126                         .encrypt = aead_encrypt,
2127                         .decrypt = aead_decrypt,
2128                         .givencrypt = aead_givencrypt,
2129                         .geniv = "<built-in>",
2130                         .ivsize = DES_BLOCK_SIZE,
2131                         .maxauthsize = SHA384_DIGEST_SIZE,
2132                         },
2133                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2134                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2135                                    OP_ALG_AAI_HMAC_PRECOMP,
2136                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2137         },
2138         {
2139                 .name = "authenc(hmac(sha512),cbc(des))",
2140                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2141                 .blocksize = DES_BLOCK_SIZE,
2142                 .type = CRYPTO_ALG_TYPE_AEAD,
2143                 .template_aead = {
2144                         .setkey = aead_setkey,
2145                         .setauthsize = aead_setauthsize,
2146                         .encrypt = aead_encrypt,
2147                         .decrypt = aead_decrypt,
2148                         .givencrypt = aead_givencrypt,
2149                         .geniv = "<built-in>",
2150                         .ivsize = DES_BLOCK_SIZE,
2151                         .maxauthsize = SHA512_DIGEST_SIZE,
2152                         },
2153                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2154                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2155                                    OP_ALG_AAI_HMAC_PRECOMP,
2156                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2157         },
2158         /* ablkcipher descriptor */
2159         {
2160                 .name = "cbc(aes)",
2161                 .driver_name = "cbc-aes-caam",
2162                 .blocksize = AES_BLOCK_SIZE,
2163                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2164                 .template_ablkcipher = {
2165                         .setkey = ablkcipher_setkey,
2166                         .encrypt = ablkcipher_encrypt,
2167                         .decrypt = ablkcipher_decrypt,
2168                         .geniv = "eseqiv",
2169                         .min_keysize = AES_MIN_KEY_SIZE,
2170                         .max_keysize = AES_MAX_KEY_SIZE,
2171                         .ivsize = AES_BLOCK_SIZE,
2172                         },
2173                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2174         },
2175         {
2176                 .name = "cbc(des3_ede)",
2177                 .driver_name = "cbc-3des-caam",
2178                 .blocksize = DES3_EDE_BLOCK_SIZE,
2179                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2180                 .template_ablkcipher = {
2181                         .setkey = ablkcipher_setkey,
2182                         .encrypt = ablkcipher_encrypt,
2183                         .decrypt = ablkcipher_decrypt,
2184                         .geniv = "eseqiv",
2185                         .min_keysize = DES3_EDE_KEY_SIZE,
2186                         .max_keysize = DES3_EDE_KEY_SIZE,
2187                         .ivsize = DES3_EDE_BLOCK_SIZE,
2188                         },
2189                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2190         },
2191         {
2192                 .name = "cbc(des)",
2193                 .driver_name = "cbc-des-caam",
2194                 .blocksize = DES_BLOCK_SIZE,
2195                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2196                 .template_ablkcipher = {
2197                         .setkey = ablkcipher_setkey,
2198                         .encrypt = ablkcipher_encrypt,
2199                         .decrypt = ablkcipher_decrypt,
2200                         .geniv = "eseqiv",
2201                         .min_keysize = DES_KEY_SIZE,
2202                         .max_keysize = DES_KEY_SIZE,
2203                         .ivsize = DES_BLOCK_SIZE,
2204                         },
2205                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2206         }
2207 };
2208
2209 struct caam_crypto_alg {
2210         struct list_head entry;
2211         struct device *ctrldev;
2212         int class1_alg_type;
2213         int class2_alg_type;
2214         int alg_op;
2215         struct crypto_alg crypto_alg;
2216 };
2217
2218 static int caam_cra_init(struct crypto_tfm *tfm)
2219 {
2220         struct crypto_alg *alg = tfm->__crt_alg;
2221         struct caam_crypto_alg *caam_alg =
2222                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2223         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2224         struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2225         int tgt_jr = atomic_inc_return(&priv->tfm_count);
2226
2227         /*
2228          * distribute tfms across job rings to ensure in-order
2229          * crypto request processing per tfm
2230          */
2231         ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
2232
2233         /* copy descriptor header template value */
2234         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2235         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2236         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2237
2238         return 0;
2239 }
2240
2241 static void caam_cra_exit(struct crypto_tfm *tfm)
2242 {
2243         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2244
2245         if (ctx->sh_desc_enc_dma &&
2246             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2247                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2248                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2249         if (ctx->sh_desc_dec_dma &&
2250             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2251                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2252                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2253         if (ctx->sh_desc_givenc_dma &&
2254             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2255                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2256                                  desc_bytes(ctx->sh_desc_givenc),
2257                                  DMA_TO_DEVICE);
2258 }
2259
2260 static void __exit caam_algapi_exit(void)
2261 {
2262
2263         struct device_node *dev_node;
2264         struct platform_device *pdev;
2265         struct device *ctrldev;
2266         struct caam_drv_private *priv;
2267         struct caam_crypto_alg *t_alg, *n;
2268         int i, err;
2269
2270         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2271         if (!dev_node) {
2272                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2273                 if (!dev_node)
2274                         return;
2275         }
2276
2277         pdev = of_find_device_by_node(dev_node);
2278         if (!pdev)
2279                 return;
2280
2281         ctrldev = &pdev->dev;
2282         of_node_put(dev_node);
2283         priv = dev_get_drvdata(ctrldev);
2284
2285         if (!priv->alg_list.next)
2286                 return;
2287
2288         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2289                 crypto_unregister_alg(&t_alg->crypto_alg);
2290                 list_del(&t_alg->entry);
2291                 kfree(t_alg);
2292         }
2293
2294         for (i = 0; i < priv->total_jobrs; i++) {
2295                 err = caam_jr_deregister(priv->algapi_jr[i]);
2296                 if (err < 0)
2297                         break;
2298         }
2299         kfree(priv->algapi_jr);
2300 }
2301
2302 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2303                                               struct caam_alg_template
2304                                               *template)
2305 {
2306         struct caam_crypto_alg *t_alg;
2307         struct crypto_alg *alg;
2308
2309         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2310         if (!t_alg) {
2311                 dev_err(ctrldev, "failed to allocate t_alg\n");
2312                 return ERR_PTR(-ENOMEM);
2313         }
2314
2315         alg = &t_alg->crypto_alg;
2316
2317         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2318         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2319                  template->driver_name);
2320         alg->cra_module = THIS_MODULE;
2321         alg->cra_init = caam_cra_init;
2322         alg->cra_exit = caam_cra_exit;
2323         alg->cra_priority = CAAM_CRA_PRIORITY;
2324         alg->cra_blocksize = template->blocksize;
2325         alg->cra_alignmask = 0;
2326         alg->cra_ctxsize = sizeof(struct caam_ctx);
2327         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2328                          template->type;
2329         switch (template->type) {
2330         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2331                 alg->cra_type = &crypto_ablkcipher_type;
2332                 alg->cra_ablkcipher = template->template_ablkcipher;
2333                 break;
2334         case CRYPTO_ALG_TYPE_AEAD:
2335                 alg->cra_type = &crypto_aead_type;
2336                 alg->cra_aead = template->template_aead;
2337                 break;
2338         }
2339
2340         t_alg->class1_alg_type = template->class1_alg_type;
2341         t_alg->class2_alg_type = template->class2_alg_type;
2342         t_alg->alg_op = template->alg_op;
2343         t_alg->ctrldev = ctrldev;
2344
2345         return t_alg;
2346 }
2347
2348 static int __init caam_algapi_init(void)
2349 {
2350         struct device_node *dev_node;
2351         struct platform_device *pdev;
2352         struct device *ctrldev, **jrdev;
2353         struct caam_drv_private *priv;
2354         int i = 0, err = 0;
2355
2356         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2357         if (!dev_node) {
2358                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2359                 if (!dev_node)
2360                         return -ENODEV;
2361         }
2362
2363         pdev = of_find_device_by_node(dev_node);
2364         if (!pdev)
2365                 return -ENODEV;
2366
2367         ctrldev = &pdev->dev;
2368         priv = dev_get_drvdata(ctrldev);
2369         of_node_put(dev_node);
2370
2371         INIT_LIST_HEAD(&priv->alg_list);
2372
2373         jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
2374         if (!jrdev)
2375                 return -ENOMEM;
2376
2377         for (i = 0; i < priv->total_jobrs; i++) {
2378                 err = caam_jr_register(ctrldev, &jrdev[i]);
2379                 if (err < 0)
2380                         break;
2381         }
2382         if (err < 0 && i == 0) {
2383                 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
2384                         err);
2385                 kfree(jrdev);
2386                 return err;
2387         }
2388
2389         priv->num_jrs_for_algapi = i;
2390         priv->algapi_jr = jrdev;
2391         atomic_set(&priv->tfm_count, -1);
2392
2393         /* register crypto algorithms the device supports */
2394         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2395                 /* TODO: check if h/w supports alg */
2396                 struct caam_crypto_alg *t_alg;
2397
2398                 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2399                 if (IS_ERR(t_alg)) {
2400                         err = PTR_ERR(t_alg);
2401                         dev_warn(ctrldev, "%s alg allocation failed\n",
2402                                  driver_algs[i].driver_name);
2403                         continue;
2404                 }
2405
2406                 err = crypto_register_alg(&t_alg->crypto_alg);
2407                 if (err) {
2408                         dev_warn(ctrldev, "%s alg registration failed\n",
2409                                 t_alg->crypto_alg.cra_driver_name);
2410                         kfree(t_alg);
2411                 } else
2412                         list_add_tail(&t_alg->entry, &priv->alg_list);
2413         }
2414         if (!list_empty(&priv->alg_list))
2415                 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2416                          (char *)of_get_property(dev_node, "compatible", NULL));
2417
2418         return err;
2419 }
2420
2421 module_init(caam_algapi_init);
2422 module_exit(caam_algapi_exit);
2423
2424 MODULE_LICENSE("GPL");
2425 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2426 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");