Merge branch 'regulator-4.20' into regulator-linus
[linux-2.6-microblaze.git] / drivers / crypto / caam / caamhash.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  *
7  * Based on caamalg.c crypto API driver.
8  *
9  * relationship of digest job descriptor or first job descriptor after init to
10  * shared descriptors:
11  *
12  * ---------------                     ---------------
13  * | JobDesc #1  |-------------------->|  ShareDesc  |
14  * | *(packet 1) |                     |  (hashKey)  |
15  * ---------------                     | (operation) |
16  *                                     ---------------
17  *
18  * relationship of subsequent job descriptors to shared descriptors:
19  *
20  * ---------------                     ---------------
21  * | JobDesc #2  |-------------------->|  ShareDesc  |
22  * | *(packet 2) |      |------------->|  (hashKey)  |
23  * ---------------      |    |-------->| (operation) |
24  *       .              |    |         | (load ctx2) |
25  *       .              |    |         ---------------
26  * ---------------      |    |
27  * | JobDesc #3  |------|    |
28  * | *(packet 3) |           |
29  * ---------------           |
30  *       .                   |
31  *       .                   |
32  * ---------------           |
33  * | JobDesc #4  |------------
34  * | *(packet 4) |
35  * ---------------
36  *
37  * The SharedDesc never changes for a connection unless rekeyed, but
38  * each packet will likely be in a different place. So all we need
39  * to know to process the packet is where the input is, where the
40  * output goes, and what context we want to process with. Context is
41  * in the SharedDesc, packet references in the JobDesc.
42  *
43  * So, a job desc looks like:
44  *
45  * ---------------------
46  * | Header            |
47  * | ShareDesc Pointer |
48  * | SEQ_OUT_PTR       |
49  * | (output buffer)   |
50  * | (output length)   |
51  * | SEQ_IN_PTR        |
52  * | (input buffer)    |
53  * | (input length)    |
54  * ---------------------
55  */
56
57 #include "compat.h"
58
59 #include "regs.h"
60 #include "intern.h"
61 #include "desc_constr.h"
62 #include "jr.h"
63 #include "error.h"
64 #include "sg_sw_sec4.h"
65 #include "key_gen.h"
66 #include "caamhash_desc.h"
67
68 #define CAAM_CRA_PRIORITY               3000
69
70 /* max hash key is max split key size */
71 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
72
73 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
74 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
75
76 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
77                                          CAAM_MAX_HASH_KEY_SIZE)
78 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
79
80 /* caam context sizes for hashes: running digest + 8 */
81 #define HASH_MSG_LEN                    8
82 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
83
84 #ifdef DEBUG
85 /* for print_hex_dumps with line references */
86 #define debug(format, arg...) printk(format, arg)
87 #else
88 #define debug(format, arg...)
89 #endif
90
91
92 static struct list_head hash_list;
93
94 /* ahash per-session context */
95 struct caam_hash_ctx {
96         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
101         dma_addr_t sh_desc_update_first_dma;
102         dma_addr_t sh_desc_fin_dma;
103         dma_addr_t sh_desc_digest_dma;
104         enum dma_data_direction dir;
105         struct device *jrdev;
106         u8 key[CAAM_MAX_HASH_KEY_SIZE];
107         int ctx_len;
108         struct alginfo adata;
109 };
110
111 /* ahash state */
112 struct caam_hash_state {
113         dma_addr_t buf_dma;
114         dma_addr_t ctx_dma;
115         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
116         int buflen_0;
117         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118         int buflen_1;
119         u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
120         int (*update)(struct ahash_request *req);
121         int (*final)(struct ahash_request *req);
122         int (*finup)(struct ahash_request *req);
123         int current_buf;
124 };
125
126 struct caam_export_state {
127         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128         u8 caam_ctx[MAX_CTX_LEN];
129         int buflen;
130         int (*update)(struct ahash_request *req);
131         int (*final)(struct ahash_request *req);
132         int (*finup)(struct ahash_request *req);
133 };
134
135 static inline void switch_buf(struct caam_hash_state *state)
136 {
137         state->current_buf ^= 1;
138 }
139
140 static inline u8 *current_buf(struct caam_hash_state *state)
141 {
142         return state->current_buf ? state->buf_1 : state->buf_0;
143 }
144
145 static inline u8 *alt_buf(struct caam_hash_state *state)
146 {
147         return state->current_buf ? state->buf_0 : state->buf_1;
148 }
149
150 static inline int *current_buflen(struct caam_hash_state *state)
151 {
152         return state->current_buf ? &state->buflen_1 : &state->buflen_0;
153 }
154
155 static inline int *alt_buflen(struct caam_hash_state *state)
156 {
157         return state->current_buf ? &state->buflen_0 : &state->buflen_1;
158 }
159
160 /* Common job descriptor seq in/out ptr routines */
161
162 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
163 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
164                                       struct caam_hash_state *state,
165                                       int ctx_len)
166 {
167         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
168                                         ctx_len, DMA_FROM_DEVICE);
169         if (dma_mapping_error(jrdev, state->ctx_dma)) {
170                 dev_err(jrdev, "unable to map ctx\n");
171                 state->ctx_dma = 0;
172                 return -ENOMEM;
173         }
174
175         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
176
177         return 0;
178 }
179
180 /* Map req->result, and append seq_out_ptr command that points to it */
181 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
182                                                 u8 *result, int digestsize)
183 {
184         dma_addr_t dst_dma;
185
186         dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
187         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
188
189         return dst_dma;
190 }
191
192 /* Map current buffer in state (if length > 0) and put it in link table */
193 static inline int buf_map_to_sec4_sg(struct device *jrdev,
194                                      struct sec4_sg_entry *sec4_sg,
195                                      struct caam_hash_state *state)
196 {
197         int buflen = *current_buflen(state);
198
199         if (!buflen)
200                 return 0;
201
202         state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
203                                         DMA_TO_DEVICE);
204         if (dma_mapping_error(jrdev, state->buf_dma)) {
205                 dev_err(jrdev, "unable to map buf\n");
206                 state->buf_dma = 0;
207                 return -ENOMEM;
208         }
209
210         dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
211
212         return 0;
213 }
214
215 /* Map state->caam_ctx, and add it to link table */
216 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
217                                      struct caam_hash_state *state, int ctx_len,
218                                      struct sec4_sg_entry *sec4_sg, u32 flag)
219 {
220         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
221         if (dma_mapping_error(jrdev, state->ctx_dma)) {
222                 dev_err(jrdev, "unable to map ctx\n");
223                 state->ctx_dma = 0;
224                 return -ENOMEM;
225         }
226
227         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
228
229         return 0;
230 }
231
232 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
233 {
234         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
235         int digestsize = crypto_ahash_digestsize(ahash);
236         struct device *jrdev = ctx->jrdev;
237         struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
238         u32 *desc;
239
240         ctx->adata.key_virt = ctx->key;
241
242         /* ahash_update shared descriptor */
243         desc = ctx->sh_desc_update;
244         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
245                           ctx->ctx_len, true, ctrlpriv->era);
246         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
247                                    desc_bytes(desc), ctx->dir);
248 #ifdef DEBUG
249         print_hex_dump(KERN_ERR,
250                        "ahash update shdesc@"__stringify(__LINE__)": ",
251                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
252 #endif
253
254         /* ahash_update_first shared descriptor */
255         desc = ctx->sh_desc_update_first;
256         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
257                           ctx->ctx_len, false, ctrlpriv->era);
258         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
259                                    desc_bytes(desc), ctx->dir);
260 #ifdef DEBUG
261         print_hex_dump(KERN_ERR,
262                        "ahash update first shdesc@"__stringify(__LINE__)": ",
263                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
264 #endif
265
266         /* ahash_final shared descriptor */
267         desc = ctx->sh_desc_fin;
268         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
269                           ctx->ctx_len, true, ctrlpriv->era);
270         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
271                                    desc_bytes(desc), ctx->dir);
272 #ifdef DEBUG
273         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
274                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
275                        desc_bytes(desc), 1);
276 #endif
277
278         /* ahash_digest shared descriptor */
279         desc = ctx->sh_desc_digest;
280         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
281                           ctx->ctx_len, false, ctrlpriv->era);
282         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
283                                    desc_bytes(desc), ctx->dir);
284 #ifdef DEBUG
285         print_hex_dump(KERN_ERR,
286                        "ahash digest shdesc@"__stringify(__LINE__)": ",
287                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
288                        desc_bytes(desc), 1);
289 #endif
290
291         return 0;
292 }
293
294 /* Digest hash size if it is too large */
295 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
296                            u32 *keylen, u8 *key_out, u32 digestsize)
297 {
298         struct device *jrdev = ctx->jrdev;
299         u32 *desc;
300         struct split_key_result result;
301         dma_addr_t src_dma, dst_dma;
302         int ret;
303
304         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
305         if (!desc) {
306                 dev_err(jrdev, "unable to allocate key input memory\n");
307                 return -ENOMEM;
308         }
309
310         init_job_desc(desc, 0);
311
312         src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
313                                  DMA_TO_DEVICE);
314         if (dma_mapping_error(jrdev, src_dma)) {
315                 dev_err(jrdev, "unable to map key input memory\n");
316                 kfree(desc);
317                 return -ENOMEM;
318         }
319         dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
320                                  DMA_FROM_DEVICE);
321         if (dma_mapping_error(jrdev, dst_dma)) {
322                 dev_err(jrdev, "unable to map key output memory\n");
323                 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
324                 kfree(desc);
325                 return -ENOMEM;
326         }
327
328         /* Job descriptor to perform unkeyed hash on key_in */
329         append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
330                          OP_ALG_AS_INITFINAL);
331         append_seq_in_ptr(desc, src_dma, *keylen, 0);
332         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
333                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
334         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
335         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
336                          LDST_SRCDST_BYTE_CONTEXT);
337
338 #ifdef DEBUG
339         print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
340                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
341         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
342                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
343 #endif
344
345         result.err = 0;
346         init_completion(&result.completion);
347
348         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
349         if (!ret) {
350                 /* in progress */
351                 wait_for_completion(&result.completion);
352                 ret = result.err;
353 #ifdef DEBUG
354                 print_hex_dump(KERN_ERR,
355                                "digested key@"__stringify(__LINE__)": ",
356                                DUMP_PREFIX_ADDRESS, 16, 4, key_in,
357                                digestsize, 1);
358 #endif
359         }
360         dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
361         dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
362
363         *keylen = digestsize;
364
365         kfree(desc);
366
367         return ret;
368 }
369
370 static int ahash_setkey(struct crypto_ahash *ahash,
371                         const u8 *key, unsigned int keylen)
372 {
373         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
374         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
375         int digestsize = crypto_ahash_digestsize(ahash);
376         struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
377         int ret;
378         u8 *hashed_key = NULL;
379
380 #ifdef DEBUG
381         printk(KERN_ERR "keylen %d\n", keylen);
382 #endif
383
384         if (keylen > blocksize) {
385                 hashed_key = kmalloc_array(digestsize,
386                                            sizeof(*hashed_key),
387                                            GFP_KERNEL | GFP_DMA);
388                 if (!hashed_key)
389                         return -ENOMEM;
390                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
391                                       digestsize);
392                 if (ret)
393                         goto bad_free_key;
394                 key = hashed_key;
395         }
396
397         /*
398          * If DKP is supported, use it in the shared descriptor to generate
399          * the split key.
400          */
401         if (ctrlpriv->era >= 6) {
402                 ctx->adata.key_inline = true;
403                 ctx->adata.keylen = keylen;
404                 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
405                                                       OP_ALG_ALGSEL_MASK);
406
407                 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
408                         goto bad_free_key;
409
410                 memcpy(ctx->key, key, keylen);
411         } else {
412                 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
413                                     keylen, CAAM_MAX_HASH_KEY_SIZE);
414                 if (ret)
415                         goto bad_free_key;
416         }
417
418         kfree(hashed_key);
419         return ahash_set_sh_desc(ahash);
420  bad_free_key:
421         kfree(hashed_key);
422         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
423         return -EINVAL;
424 }
425
426 /*
427  * ahash_edesc - s/w-extended ahash descriptor
428  * @dst_dma: physical mapped address of req->result
429  * @sec4_sg_dma: physical mapped address of h/w link table
430  * @src_nents: number of segments in input scatterlist
431  * @sec4_sg_bytes: length of dma mapped sec4_sg space
432  * @hw_desc: the h/w job descriptor followed by any referenced link tables
433  * @sec4_sg: h/w link table
434  */
435 struct ahash_edesc {
436         dma_addr_t dst_dma;
437         dma_addr_t sec4_sg_dma;
438         int src_nents;
439         int sec4_sg_bytes;
440         u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
441         struct sec4_sg_entry sec4_sg[0];
442 };
443
444 static inline void ahash_unmap(struct device *dev,
445                         struct ahash_edesc *edesc,
446                         struct ahash_request *req, int dst_len)
447 {
448         struct caam_hash_state *state = ahash_request_ctx(req);
449
450         if (edesc->src_nents)
451                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
452         if (edesc->dst_dma)
453                 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
454
455         if (edesc->sec4_sg_bytes)
456                 dma_unmap_single(dev, edesc->sec4_sg_dma,
457                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
458
459         if (state->buf_dma) {
460                 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
461                                  DMA_TO_DEVICE);
462                 state->buf_dma = 0;
463         }
464 }
465
466 static inline void ahash_unmap_ctx(struct device *dev,
467                         struct ahash_edesc *edesc,
468                         struct ahash_request *req, int dst_len, u32 flag)
469 {
470         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
471         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
472         struct caam_hash_state *state = ahash_request_ctx(req);
473
474         if (state->ctx_dma) {
475                 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
476                 state->ctx_dma = 0;
477         }
478         ahash_unmap(dev, edesc, req, dst_len);
479 }
480
481 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
482                        void *context)
483 {
484         struct ahash_request *req = context;
485         struct ahash_edesc *edesc;
486         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
487         int digestsize = crypto_ahash_digestsize(ahash);
488 #ifdef DEBUG
489         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
490         struct caam_hash_state *state = ahash_request_ctx(req);
491
492         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
493 #endif
494
495         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
496         if (err)
497                 caam_jr_strstatus(jrdev, err);
498
499         ahash_unmap(jrdev, edesc, req, digestsize);
500         kfree(edesc);
501
502 #ifdef DEBUG
503         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
504                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
505                        ctx->ctx_len, 1);
506         if (req->result)
507                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
508                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
509                                digestsize, 1);
510 #endif
511
512         req->base.complete(&req->base, err);
513 }
514
515 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
516                             void *context)
517 {
518         struct ahash_request *req = context;
519         struct ahash_edesc *edesc;
520         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
521         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
522         struct caam_hash_state *state = ahash_request_ctx(req);
523 #ifdef DEBUG
524         int digestsize = crypto_ahash_digestsize(ahash);
525
526         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
527 #endif
528
529         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
530         if (err)
531                 caam_jr_strstatus(jrdev, err);
532
533         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
534         switch_buf(state);
535         kfree(edesc);
536
537 #ifdef DEBUG
538         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
539                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
540                        ctx->ctx_len, 1);
541         if (req->result)
542                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
543                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
544                                digestsize, 1);
545 #endif
546
547         req->base.complete(&req->base, err);
548 }
549
550 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
551                                void *context)
552 {
553         struct ahash_request *req = context;
554         struct ahash_edesc *edesc;
555         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
556         int digestsize = crypto_ahash_digestsize(ahash);
557 #ifdef DEBUG
558         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
559         struct caam_hash_state *state = ahash_request_ctx(req);
560
561         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
562 #endif
563
564         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
565         if (err)
566                 caam_jr_strstatus(jrdev, err);
567
568         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
569         kfree(edesc);
570
571 #ifdef DEBUG
572         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
573                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
574                        ctx->ctx_len, 1);
575         if (req->result)
576                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
577                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
578                                digestsize, 1);
579 #endif
580
581         req->base.complete(&req->base, err);
582 }
583
584 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
585                                void *context)
586 {
587         struct ahash_request *req = context;
588         struct ahash_edesc *edesc;
589         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
590         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
591         struct caam_hash_state *state = ahash_request_ctx(req);
592 #ifdef DEBUG
593         int digestsize = crypto_ahash_digestsize(ahash);
594
595         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
596 #endif
597
598         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
599         if (err)
600                 caam_jr_strstatus(jrdev, err);
601
602         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
603         switch_buf(state);
604         kfree(edesc);
605
606 #ifdef DEBUG
607         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
608                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
609                        ctx->ctx_len, 1);
610         if (req->result)
611                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
612                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
613                                digestsize, 1);
614 #endif
615
616         req->base.complete(&req->base, err);
617 }
618
619 /*
620  * Allocate an enhanced descriptor, which contains the hardware descriptor
621  * and space for hardware scatter table containing sg_num entries.
622  */
623 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
624                                              int sg_num, u32 *sh_desc,
625                                              dma_addr_t sh_desc_dma,
626                                              gfp_t flags)
627 {
628         struct ahash_edesc *edesc;
629         unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
630
631         edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
632         if (!edesc) {
633                 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
634                 return NULL;
635         }
636
637         init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
638                              HDR_SHARE_DEFER | HDR_REVERSE);
639
640         return edesc;
641 }
642
643 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
644                                struct ahash_edesc *edesc,
645                                struct ahash_request *req, int nents,
646                                unsigned int first_sg,
647                                unsigned int first_bytes, size_t to_hash)
648 {
649         dma_addr_t src_dma;
650         u32 options;
651
652         if (nents > 1 || first_sg) {
653                 struct sec4_sg_entry *sg = edesc->sec4_sg;
654                 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
655
656                 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
657
658                 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
659                 if (dma_mapping_error(ctx->jrdev, src_dma)) {
660                         dev_err(ctx->jrdev, "unable to map S/G table\n");
661                         return -ENOMEM;
662                 }
663
664                 edesc->sec4_sg_bytes = sgsize;
665                 edesc->sec4_sg_dma = src_dma;
666                 options = LDST_SGF;
667         } else {
668                 src_dma = sg_dma_address(req->src);
669                 options = 0;
670         }
671
672         append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
673                           options);
674
675         return 0;
676 }
677
678 /* submit update job descriptor */
679 static int ahash_update_ctx(struct ahash_request *req)
680 {
681         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
682         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
683         struct caam_hash_state *state = ahash_request_ctx(req);
684         struct device *jrdev = ctx->jrdev;
685         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
686                        GFP_KERNEL : GFP_ATOMIC;
687         u8 *buf = current_buf(state);
688         int *buflen = current_buflen(state);
689         u8 *next_buf = alt_buf(state);
690         int *next_buflen = alt_buflen(state), last_buflen;
691         int in_len = *buflen + req->nbytes, to_hash;
692         u32 *desc;
693         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
694         struct ahash_edesc *edesc;
695         int ret = 0;
696
697         last_buflen = *next_buflen;
698         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
699         to_hash = in_len - *next_buflen;
700
701         if (to_hash) {
702                 src_nents = sg_nents_for_len(req->src,
703                                              req->nbytes - (*next_buflen));
704                 if (src_nents < 0) {
705                         dev_err(jrdev, "Invalid number of src SG.\n");
706                         return src_nents;
707                 }
708
709                 if (src_nents) {
710                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
711                                                   DMA_TO_DEVICE);
712                         if (!mapped_nents) {
713                                 dev_err(jrdev, "unable to DMA map source\n");
714                                 return -ENOMEM;
715                         }
716                 } else {
717                         mapped_nents = 0;
718                 }
719
720                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
721                 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
722                                  sizeof(struct sec4_sg_entry);
723
724                 /*
725                  * allocate space for base edesc and hw desc commands,
726                  * link tables
727                  */
728                 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
729                                           ctx->sh_desc_update,
730                                           ctx->sh_desc_update_dma, flags);
731                 if (!edesc) {
732                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
733                         return -ENOMEM;
734                 }
735
736                 edesc->src_nents = src_nents;
737                 edesc->sec4_sg_bytes = sec4_sg_bytes;
738
739                 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
740                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
741                 if (ret)
742                         goto unmap_ctx;
743
744                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
745                 if (ret)
746                         goto unmap_ctx;
747
748                 if (mapped_nents) {
749                         sg_to_sec4_sg_last(req->src, mapped_nents,
750                                            edesc->sec4_sg + sec4_sg_src_index,
751                                            0);
752                         if (*next_buflen)
753                                 scatterwalk_map_and_copy(next_buf, req->src,
754                                                          to_hash - *buflen,
755                                                          *next_buflen, 0);
756                 } else {
757                         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
758                                             1);
759                 }
760
761                 desc = edesc->hw_desc;
762
763                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
764                                                      sec4_sg_bytes,
765                                                      DMA_TO_DEVICE);
766                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
767                         dev_err(jrdev, "unable to map S/G table\n");
768                         ret = -ENOMEM;
769                         goto unmap_ctx;
770                 }
771
772                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
773                                        to_hash, LDST_SGF);
774
775                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
776
777 #ifdef DEBUG
778                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
779                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
780                                desc_bytes(desc), 1);
781 #endif
782
783                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
784                 if (ret)
785                         goto unmap_ctx;
786
787                 ret = -EINPROGRESS;
788         } else if (*next_buflen) {
789                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
790                                          req->nbytes, 0);
791                 *buflen = *next_buflen;
792                 *next_buflen = last_buflen;
793         }
794 #ifdef DEBUG
795         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
796                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
797         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
798                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
799                        *next_buflen, 1);
800 #endif
801
802         return ret;
803  unmap_ctx:
804         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
805         kfree(edesc);
806         return ret;
807 }
808
809 static int ahash_final_ctx(struct ahash_request *req)
810 {
811         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
812         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
813         struct caam_hash_state *state = ahash_request_ctx(req);
814         struct device *jrdev = ctx->jrdev;
815         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
816                        GFP_KERNEL : GFP_ATOMIC;
817         int buflen = *current_buflen(state);
818         u32 *desc;
819         int sec4_sg_bytes, sec4_sg_src_index;
820         int digestsize = crypto_ahash_digestsize(ahash);
821         struct ahash_edesc *edesc;
822         int ret;
823
824         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
825         sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
826
827         /* allocate space for base edesc and hw desc commands, link tables */
828         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
829                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
830                                   flags);
831         if (!edesc)
832                 return -ENOMEM;
833
834         desc = edesc->hw_desc;
835
836         edesc->sec4_sg_bytes = sec4_sg_bytes;
837
838         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
839                                  edesc->sec4_sg, DMA_TO_DEVICE);
840         if (ret)
841                 goto unmap_ctx;
842
843         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
844         if (ret)
845                 goto unmap_ctx;
846
847         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
848
849         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
850                                             sec4_sg_bytes, DMA_TO_DEVICE);
851         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
852                 dev_err(jrdev, "unable to map S/G table\n");
853                 ret = -ENOMEM;
854                 goto unmap_ctx;
855         }
856
857         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
858                           LDST_SGF);
859
860         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
861                                                 digestsize);
862         if (dma_mapping_error(jrdev, edesc->dst_dma)) {
863                 dev_err(jrdev, "unable to map dst\n");
864                 ret = -ENOMEM;
865                 goto unmap_ctx;
866         }
867
868 #ifdef DEBUG
869         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
870                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
871 #endif
872
873         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
874         if (ret)
875                 goto unmap_ctx;
876
877         return -EINPROGRESS;
878  unmap_ctx:
879         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
880         kfree(edesc);
881         return ret;
882 }
883
884 static int ahash_finup_ctx(struct ahash_request *req)
885 {
886         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
887         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
888         struct caam_hash_state *state = ahash_request_ctx(req);
889         struct device *jrdev = ctx->jrdev;
890         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
891                        GFP_KERNEL : GFP_ATOMIC;
892         int buflen = *current_buflen(state);
893         u32 *desc;
894         int sec4_sg_src_index;
895         int src_nents, mapped_nents;
896         int digestsize = crypto_ahash_digestsize(ahash);
897         struct ahash_edesc *edesc;
898         int ret;
899
900         src_nents = sg_nents_for_len(req->src, req->nbytes);
901         if (src_nents < 0) {
902                 dev_err(jrdev, "Invalid number of src SG.\n");
903                 return src_nents;
904         }
905
906         if (src_nents) {
907                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
908                                           DMA_TO_DEVICE);
909                 if (!mapped_nents) {
910                         dev_err(jrdev, "unable to DMA map source\n");
911                         return -ENOMEM;
912                 }
913         } else {
914                 mapped_nents = 0;
915         }
916
917         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
918
919         /* allocate space for base edesc and hw desc commands, link tables */
920         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
921                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
922                                   flags);
923         if (!edesc) {
924                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
925                 return -ENOMEM;
926         }
927
928         desc = edesc->hw_desc;
929
930         edesc->src_nents = src_nents;
931
932         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
933                                  edesc->sec4_sg, DMA_TO_DEVICE);
934         if (ret)
935                 goto unmap_ctx;
936
937         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
938         if (ret)
939                 goto unmap_ctx;
940
941         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
942                                   sec4_sg_src_index, ctx->ctx_len + buflen,
943                                   req->nbytes);
944         if (ret)
945                 goto unmap_ctx;
946
947         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
948                                                 digestsize);
949         if (dma_mapping_error(jrdev, edesc->dst_dma)) {
950                 dev_err(jrdev, "unable to map dst\n");
951                 ret = -ENOMEM;
952                 goto unmap_ctx;
953         }
954
955 #ifdef DEBUG
956         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
957                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
958 #endif
959
960         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
961         if (ret)
962                 goto unmap_ctx;
963
964         return -EINPROGRESS;
965  unmap_ctx:
966         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
967         kfree(edesc);
968         return ret;
969 }
970
971 static int ahash_digest(struct ahash_request *req)
972 {
973         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
974         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
975         struct caam_hash_state *state = ahash_request_ctx(req);
976         struct device *jrdev = ctx->jrdev;
977         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
978                        GFP_KERNEL : GFP_ATOMIC;
979         u32 *desc;
980         int digestsize = crypto_ahash_digestsize(ahash);
981         int src_nents, mapped_nents;
982         struct ahash_edesc *edesc;
983         int ret;
984
985         state->buf_dma = 0;
986
987         src_nents = sg_nents_for_len(req->src, req->nbytes);
988         if (src_nents < 0) {
989                 dev_err(jrdev, "Invalid number of src SG.\n");
990                 return src_nents;
991         }
992
993         if (src_nents) {
994                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
995                                           DMA_TO_DEVICE);
996                 if (!mapped_nents) {
997                         dev_err(jrdev, "unable to map source for DMA\n");
998                         return -ENOMEM;
999                 }
1000         } else {
1001                 mapped_nents = 0;
1002         }
1003
1004         /* allocate space for base edesc and hw desc commands, link tables */
1005         edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1006                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1007                                   flags);
1008         if (!edesc) {
1009                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1010                 return -ENOMEM;
1011         }
1012
1013         edesc->src_nents = src_nents;
1014
1015         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1016                                   req->nbytes);
1017         if (ret) {
1018                 ahash_unmap(jrdev, edesc, req, digestsize);
1019                 kfree(edesc);
1020                 return ret;
1021         }
1022
1023         desc = edesc->hw_desc;
1024
1025         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1026                                                 digestsize);
1027         if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1028                 dev_err(jrdev, "unable to map dst\n");
1029                 ahash_unmap(jrdev, edesc, req, digestsize);
1030                 kfree(edesc);
1031                 return -ENOMEM;
1032         }
1033
1034 #ifdef DEBUG
1035         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1036                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1037 #endif
1038
1039         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1040         if (!ret) {
1041                 ret = -EINPROGRESS;
1042         } else {
1043                 ahash_unmap(jrdev, edesc, req, digestsize);
1044                 kfree(edesc);
1045         }
1046
1047         return ret;
1048 }
1049
1050 /* submit ahash final if it the first job descriptor */
1051 static int ahash_final_no_ctx(struct ahash_request *req)
1052 {
1053         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1054         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1055         struct caam_hash_state *state = ahash_request_ctx(req);
1056         struct device *jrdev = ctx->jrdev;
1057         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1058                        GFP_KERNEL : GFP_ATOMIC;
1059         u8 *buf = current_buf(state);
1060         int buflen = *current_buflen(state);
1061         u32 *desc;
1062         int digestsize = crypto_ahash_digestsize(ahash);
1063         struct ahash_edesc *edesc;
1064         int ret;
1065
1066         /* allocate space for base edesc and hw desc commands, link tables */
1067         edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1068                                   ctx->sh_desc_digest_dma, flags);
1069         if (!edesc)
1070                 return -ENOMEM;
1071
1072         desc = edesc->hw_desc;
1073
1074         state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1075         if (dma_mapping_error(jrdev, state->buf_dma)) {
1076                 dev_err(jrdev, "unable to map src\n");
1077                 goto unmap;
1078         }
1079
1080         append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1081
1082         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1083                                                 digestsize);
1084         if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1085                 dev_err(jrdev, "unable to map dst\n");
1086                 goto unmap;
1087         }
1088
1089 #ifdef DEBUG
1090         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1091                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1092 #endif
1093
1094         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1095         if (!ret) {
1096                 ret = -EINPROGRESS;
1097         } else {
1098                 ahash_unmap(jrdev, edesc, req, digestsize);
1099                 kfree(edesc);
1100         }
1101
1102         return ret;
1103  unmap:
1104         ahash_unmap(jrdev, edesc, req, digestsize);
1105         kfree(edesc);
1106         return -ENOMEM;
1107
1108 }
1109
1110 /* submit ahash update if it the first job descriptor after update */
1111 static int ahash_update_no_ctx(struct ahash_request *req)
1112 {
1113         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1114         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1115         struct caam_hash_state *state = ahash_request_ctx(req);
1116         struct device *jrdev = ctx->jrdev;
1117         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118                        GFP_KERNEL : GFP_ATOMIC;
1119         u8 *buf = current_buf(state);
1120         int *buflen = current_buflen(state);
1121         u8 *next_buf = alt_buf(state);
1122         int *next_buflen = alt_buflen(state);
1123         int in_len = *buflen + req->nbytes, to_hash;
1124         int sec4_sg_bytes, src_nents, mapped_nents;
1125         struct ahash_edesc *edesc;
1126         u32 *desc;
1127         int ret = 0;
1128
1129         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1130         to_hash = in_len - *next_buflen;
1131
1132         if (to_hash) {
1133                 src_nents = sg_nents_for_len(req->src,
1134                                              req->nbytes - *next_buflen);
1135                 if (src_nents < 0) {
1136                         dev_err(jrdev, "Invalid number of src SG.\n");
1137                         return src_nents;
1138                 }
1139
1140                 if (src_nents) {
1141                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1142                                                   DMA_TO_DEVICE);
1143                         if (!mapped_nents) {
1144                                 dev_err(jrdev, "unable to DMA map source\n");
1145                                 return -ENOMEM;
1146                         }
1147                 } else {
1148                         mapped_nents = 0;
1149                 }
1150
1151                 sec4_sg_bytes = (1 + mapped_nents) *
1152                                 sizeof(struct sec4_sg_entry);
1153
1154                 /*
1155                  * allocate space for base edesc and hw desc commands,
1156                  * link tables
1157                  */
1158                 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1159                                           ctx->sh_desc_update_first,
1160                                           ctx->sh_desc_update_first_dma,
1161                                           flags);
1162                 if (!edesc) {
1163                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1164                         return -ENOMEM;
1165                 }
1166
1167                 edesc->src_nents = src_nents;
1168                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1169
1170                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1171                 if (ret)
1172                         goto unmap_ctx;
1173
1174                 sg_to_sec4_sg_last(req->src, mapped_nents,
1175                                    edesc->sec4_sg + 1, 0);
1176
1177                 if (*next_buflen) {
1178                         scatterwalk_map_and_copy(next_buf, req->src,
1179                                                  to_hash - *buflen,
1180                                                  *next_buflen, 0);
1181                 }
1182
1183                 desc = edesc->hw_desc;
1184
1185                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1186                                                     sec4_sg_bytes,
1187                                                     DMA_TO_DEVICE);
1188                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1189                         dev_err(jrdev, "unable to map S/G table\n");
1190                         ret = -ENOMEM;
1191                         goto unmap_ctx;
1192                 }
1193
1194                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1195
1196                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1197                 if (ret)
1198                         goto unmap_ctx;
1199
1200 #ifdef DEBUG
1201                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1202                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1203                                desc_bytes(desc), 1);
1204 #endif
1205
1206                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1207                 if (ret)
1208                         goto unmap_ctx;
1209
1210                 ret = -EINPROGRESS;
1211                 state->update = ahash_update_ctx;
1212                 state->finup = ahash_finup_ctx;
1213                 state->final = ahash_final_ctx;
1214         } else if (*next_buflen) {
1215                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1216                                          req->nbytes, 0);
1217                 *buflen = *next_buflen;
1218                 *next_buflen = 0;
1219         }
1220 #ifdef DEBUG
1221         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1222                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1223         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1224                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1225                        *next_buflen, 1);
1226 #endif
1227
1228         return ret;
1229  unmap_ctx:
1230         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1231         kfree(edesc);
1232         return ret;
1233 }
1234
1235 /* submit ahash finup if it the first job descriptor after update */
1236 static int ahash_finup_no_ctx(struct ahash_request *req)
1237 {
1238         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1239         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1240         struct caam_hash_state *state = ahash_request_ctx(req);
1241         struct device *jrdev = ctx->jrdev;
1242         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1243                        GFP_KERNEL : GFP_ATOMIC;
1244         int buflen = *current_buflen(state);
1245         u32 *desc;
1246         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1247         int digestsize = crypto_ahash_digestsize(ahash);
1248         struct ahash_edesc *edesc;
1249         int ret;
1250
1251         src_nents = sg_nents_for_len(req->src, req->nbytes);
1252         if (src_nents < 0) {
1253                 dev_err(jrdev, "Invalid number of src SG.\n");
1254                 return src_nents;
1255         }
1256
1257         if (src_nents) {
1258                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1259                                           DMA_TO_DEVICE);
1260                 if (!mapped_nents) {
1261                         dev_err(jrdev, "unable to DMA map source\n");
1262                         return -ENOMEM;
1263                 }
1264         } else {
1265                 mapped_nents = 0;
1266         }
1267
1268         sec4_sg_src_index = 2;
1269         sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1270                          sizeof(struct sec4_sg_entry);
1271
1272         /* allocate space for base edesc and hw desc commands, link tables */
1273         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1274                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1275                                   flags);
1276         if (!edesc) {
1277                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1278                 return -ENOMEM;
1279         }
1280
1281         desc = edesc->hw_desc;
1282
1283         edesc->src_nents = src_nents;
1284         edesc->sec4_sg_bytes = sec4_sg_bytes;
1285
1286         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1287         if (ret)
1288                 goto unmap;
1289
1290         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1291                                   req->nbytes);
1292         if (ret) {
1293                 dev_err(jrdev, "unable to map S/G table\n");
1294                 goto unmap;
1295         }
1296
1297         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1298                                                 digestsize);
1299         if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1300                 dev_err(jrdev, "unable to map dst\n");
1301                 goto unmap;
1302         }
1303
1304 #ifdef DEBUG
1305         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1306                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1307 #endif
1308
1309         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1310         if (!ret) {
1311                 ret = -EINPROGRESS;
1312         } else {
1313                 ahash_unmap(jrdev, edesc, req, digestsize);
1314                 kfree(edesc);
1315         }
1316
1317         return ret;
1318  unmap:
1319         ahash_unmap(jrdev, edesc, req, digestsize);
1320         kfree(edesc);
1321         return -ENOMEM;
1322
1323 }
1324
1325 /* submit first update job descriptor after init */
1326 static int ahash_update_first(struct ahash_request *req)
1327 {
1328         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1329         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1330         struct caam_hash_state *state = ahash_request_ctx(req);
1331         struct device *jrdev = ctx->jrdev;
1332         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1333                        GFP_KERNEL : GFP_ATOMIC;
1334         u8 *next_buf = alt_buf(state);
1335         int *next_buflen = alt_buflen(state);
1336         int to_hash;
1337         u32 *desc;
1338         int src_nents, mapped_nents;
1339         struct ahash_edesc *edesc;
1340         int ret = 0;
1341
1342         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1343                                       1);
1344         to_hash = req->nbytes - *next_buflen;
1345
1346         if (to_hash) {
1347                 src_nents = sg_nents_for_len(req->src,
1348                                              req->nbytes - *next_buflen);
1349                 if (src_nents < 0) {
1350                         dev_err(jrdev, "Invalid number of src SG.\n");
1351                         return src_nents;
1352                 }
1353
1354                 if (src_nents) {
1355                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1356                                                   DMA_TO_DEVICE);
1357                         if (!mapped_nents) {
1358                                 dev_err(jrdev, "unable to map source for DMA\n");
1359                                 return -ENOMEM;
1360                         }
1361                 } else {
1362                         mapped_nents = 0;
1363                 }
1364
1365                 /*
1366                  * allocate space for base edesc and hw desc commands,
1367                  * link tables
1368                  */
1369                 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1370                                           mapped_nents : 0,
1371                                           ctx->sh_desc_update_first,
1372                                           ctx->sh_desc_update_first_dma,
1373                                           flags);
1374                 if (!edesc) {
1375                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1376                         return -ENOMEM;
1377                 }
1378
1379                 edesc->src_nents = src_nents;
1380
1381                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1382                                           to_hash);
1383                 if (ret)
1384                         goto unmap_ctx;
1385
1386                 if (*next_buflen)
1387                         scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1388                                                  *next_buflen, 0);
1389
1390                 desc = edesc->hw_desc;
1391
1392                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1393                 if (ret)
1394                         goto unmap_ctx;
1395
1396 #ifdef DEBUG
1397                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1398                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1399                                desc_bytes(desc), 1);
1400 #endif
1401
1402                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1403                 if (ret)
1404                         goto unmap_ctx;
1405
1406                 ret = -EINPROGRESS;
1407                 state->update = ahash_update_ctx;
1408                 state->finup = ahash_finup_ctx;
1409                 state->final = ahash_final_ctx;
1410         } else if (*next_buflen) {
1411                 state->update = ahash_update_no_ctx;
1412                 state->finup = ahash_finup_no_ctx;
1413                 state->final = ahash_final_no_ctx;
1414                 scatterwalk_map_and_copy(next_buf, req->src, 0,
1415                                          req->nbytes, 0);
1416                 switch_buf(state);
1417         }
1418 #ifdef DEBUG
1419         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1420                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1421                        *next_buflen, 1);
1422 #endif
1423
1424         return ret;
1425  unmap_ctx:
1426         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1427         kfree(edesc);
1428         return ret;
1429 }
1430
1431 static int ahash_finup_first(struct ahash_request *req)
1432 {
1433         return ahash_digest(req);
1434 }
1435
1436 static int ahash_init(struct ahash_request *req)
1437 {
1438         struct caam_hash_state *state = ahash_request_ctx(req);
1439
1440         state->update = ahash_update_first;
1441         state->finup = ahash_finup_first;
1442         state->final = ahash_final_no_ctx;
1443
1444         state->ctx_dma = 0;
1445         state->current_buf = 0;
1446         state->buf_dma = 0;
1447         state->buflen_0 = 0;
1448         state->buflen_1 = 0;
1449
1450         return 0;
1451 }
1452
1453 static int ahash_update(struct ahash_request *req)
1454 {
1455         struct caam_hash_state *state = ahash_request_ctx(req);
1456
1457         return state->update(req);
1458 }
1459
1460 static int ahash_finup(struct ahash_request *req)
1461 {
1462         struct caam_hash_state *state = ahash_request_ctx(req);
1463
1464         return state->finup(req);
1465 }
1466
1467 static int ahash_final(struct ahash_request *req)
1468 {
1469         struct caam_hash_state *state = ahash_request_ctx(req);
1470
1471         return state->final(req);
1472 }
1473
1474 static int ahash_export(struct ahash_request *req, void *out)
1475 {
1476         struct caam_hash_state *state = ahash_request_ctx(req);
1477         struct caam_export_state *export = out;
1478         int len;
1479         u8 *buf;
1480
1481         if (state->current_buf) {
1482                 buf = state->buf_1;
1483                 len = state->buflen_1;
1484         } else {
1485                 buf = state->buf_0;
1486                 len = state->buflen_0;
1487         }
1488
1489         memcpy(export->buf, buf, len);
1490         memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1491         export->buflen = len;
1492         export->update = state->update;
1493         export->final = state->final;
1494         export->finup = state->finup;
1495
1496         return 0;
1497 }
1498
1499 static int ahash_import(struct ahash_request *req, const void *in)
1500 {
1501         struct caam_hash_state *state = ahash_request_ctx(req);
1502         const struct caam_export_state *export = in;
1503
1504         memset(state, 0, sizeof(*state));
1505         memcpy(state->buf_0, export->buf, export->buflen);
1506         memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1507         state->buflen_0 = export->buflen;
1508         state->update = export->update;
1509         state->final = export->final;
1510         state->finup = export->finup;
1511
1512         return 0;
1513 }
1514
1515 struct caam_hash_template {
1516         char name[CRYPTO_MAX_ALG_NAME];
1517         char driver_name[CRYPTO_MAX_ALG_NAME];
1518         char hmac_name[CRYPTO_MAX_ALG_NAME];
1519         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1520         unsigned int blocksize;
1521         struct ahash_alg template_ahash;
1522         u32 alg_type;
1523 };
1524
1525 /* ahash descriptors */
1526 static struct caam_hash_template driver_hash[] = {
1527         {
1528                 .name = "sha1",
1529                 .driver_name = "sha1-caam",
1530                 .hmac_name = "hmac(sha1)",
1531                 .hmac_driver_name = "hmac-sha1-caam",
1532                 .blocksize = SHA1_BLOCK_SIZE,
1533                 .template_ahash = {
1534                         .init = ahash_init,
1535                         .update = ahash_update,
1536                         .final = ahash_final,
1537                         .finup = ahash_finup,
1538                         .digest = ahash_digest,
1539                         .export = ahash_export,
1540                         .import = ahash_import,
1541                         .setkey = ahash_setkey,
1542                         .halg = {
1543                                 .digestsize = SHA1_DIGEST_SIZE,
1544                                 .statesize = sizeof(struct caam_export_state),
1545                         },
1546                 },
1547                 .alg_type = OP_ALG_ALGSEL_SHA1,
1548         }, {
1549                 .name = "sha224",
1550                 .driver_name = "sha224-caam",
1551                 .hmac_name = "hmac(sha224)",
1552                 .hmac_driver_name = "hmac-sha224-caam",
1553                 .blocksize = SHA224_BLOCK_SIZE,
1554                 .template_ahash = {
1555                         .init = ahash_init,
1556                         .update = ahash_update,
1557                         .final = ahash_final,
1558                         .finup = ahash_finup,
1559                         .digest = ahash_digest,
1560                         .export = ahash_export,
1561                         .import = ahash_import,
1562                         .setkey = ahash_setkey,
1563                         .halg = {
1564                                 .digestsize = SHA224_DIGEST_SIZE,
1565                                 .statesize = sizeof(struct caam_export_state),
1566                         },
1567                 },
1568                 .alg_type = OP_ALG_ALGSEL_SHA224,
1569         }, {
1570                 .name = "sha256",
1571                 .driver_name = "sha256-caam",
1572                 .hmac_name = "hmac(sha256)",
1573                 .hmac_driver_name = "hmac-sha256-caam",
1574                 .blocksize = SHA256_BLOCK_SIZE,
1575                 .template_ahash = {
1576                         .init = ahash_init,
1577                         .update = ahash_update,
1578                         .final = ahash_final,
1579                         .finup = ahash_finup,
1580                         .digest = ahash_digest,
1581                         .export = ahash_export,
1582                         .import = ahash_import,
1583                         .setkey = ahash_setkey,
1584                         .halg = {
1585                                 .digestsize = SHA256_DIGEST_SIZE,
1586                                 .statesize = sizeof(struct caam_export_state),
1587                         },
1588                 },
1589                 .alg_type = OP_ALG_ALGSEL_SHA256,
1590         }, {
1591                 .name = "sha384",
1592                 .driver_name = "sha384-caam",
1593                 .hmac_name = "hmac(sha384)",
1594                 .hmac_driver_name = "hmac-sha384-caam",
1595                 .blocksize = SHA384_BLOCK_SIZE,
1596                 .template_ahash = {
1597                         .init = ahash_init,
1598                         .update = ahash_update,
1599                         .final = ahash_final,
1600                         .finup = ahash_finup,
1601                         .digest = ahash_digest,
1602                         .export = ahash_export,
1603                         .import = ahash_import,
1604                         .setkey = ahash_setkey,
1605                         .halg = {
1606                                 .digestsize = SHA384_DIGEST_SIZE,
1607                                 .statesize = sizeof(struct caam_export_state),
1608                         },
1609                 },
1610                 .alg_type = OP_ALG_ALGSEL_SHA384,
1611         }, {
1612                 .name = "sha512",
1613                 .driver_name = "sha512-caam",
1614                 .hmac_name = "hmac(sha512)",
1615                 .hmac_driver_name = "hmac-sha512-caam",
1616                 .blocksize = SHA512_BLOCK_SIZE,
1617                 .template_ahash = {
1618                         .init = ahash_init,
1619                         .update = ahash_update,
1620                         .final = ahash_final,
1621                         .finup = ahash_finup,
1622                         .digest = ahash_digest,
1623                         .export = ahash_export,
1624                         .import = ahash_import,
1625                         .setkey = ahash_setkey,
1626                         .halg = {
1627                                 .digestsize = SHA512_DIGEST_SIZE,
1628                                 .statesize = sizeof(struct caam_export_state),
1629                         },
1630                 },
1631                 .alg_type = OP_ALG_ALGSEL_SHA512,
1632         }, {
1633                 .name = "md5",
1634                 .driver_name = "md5-caam",
1635                 .hmac_name = "hmac(md5)",
1636                 .hmac_driver_name = "hmac-md5-caam",
1637                 .blocksize = MD5_BLOCK_WORDS * 4,
1638                 .template_ahash = {
1639                         .init = ahash_init,
1640                         .update = ahash_update,
1641                         .final = ahash_final,
1642                         .finup = ahash_finup,
1643                         .digest = ahash_digest,
1644                         .export = ahash_export,
1645                         .import = ahash_import,
1646                         .setkey = ahash_setkey,
1647                         .halg = {
1648                                 .digestsize = MD5_DIGEST_SIZE,
1649                                 .statesize = sizeof(struct caam_export_state),
1650                         },
1651                 },
1652                 .alg_type = OP_ALG_ALGSEL_MD5,
1653         },
1654 };
1655
1656 struct caam_hash_alg {
1657         struct list_head entry;
1658         int alg_type;
1659         struct ahash_alg ahash_alg;
1660 };
1661
1662 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1663 {
1664         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1665         struct crypto_alg *base = tfm->__crt_alg;
1666         struct hash_alg_common *halg =
1667                  container_of(base, struct hash_alg_common, base);
1668         struct ahash_alg *alg =
1669                  container_of(halg, struct ahash_alg, halg);
1670         struct caam_hash_alg *caam_hash =
1671                  container_of(alg, struct caam_hash_alg, ahash_alg);
1672         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1673         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1674         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1675                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1676                                          HASH_MSG_LEN + 32,
1677                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1678                                          HASH_MSG_LEN + 64,
1679                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1680         dma_addr_t dma_addr;
1681         struct caam_drv_private *priv;
1682
1683         /*
1684          * Get a Job ring from Job Ring driver to ensure in-order
1685          * crypto request processing per tfm
1686          */
1687         ctx->jrdev = caam_jr_alloc();
1688         if (IS_ERR(ctx->jrdev)) {
1689                 pr_err("Job Ring Device allocation for transform failed\n");
1690                 return PTR_ERR(ctx->jrdev);
1691         }
1692
1693         priv = dev_get_drvdata(ctx->jrdev->parent);
1694         ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1695
1696         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1697                                         offsetof(struct caam_hash_ctx,
1698                                                  sh_desc_update_dma),
1699                                         ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1700         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1701                 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1702                 caam_jr_free(ctx->jrdev);
1703                 return -ENOMEM;
1704         }
1705
1706         ctx->sh_desc_update_dma = dma_addr;
1707         ctx->sh_desc_update_first_dma = dma_addr +
1708                                         offsetof(struct caam_hash_ctx,
1709                                                  sh_desc_update_first);
1710         ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1711                                                    sh_desc_fin);
1712         ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1713                                                       sh_desc_digest);
1714
1715         /* copy descriptor header template value */
1716         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1717
1718         ctx->ctx_len = runninglen[(ctx->adata.algtype &
1719                                    OP_ALG_ALGSEL_SUBMASK) >>
1720                                   OP_ALG_ALGSEL_SHIFT];
1721
1722         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1723                                  sizeof(struct caam_hash_state));
1724         return ahash_set_sh_desc(ahash);
1725 }
1726
1727 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1728 {
1729         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1730
1731         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1732                                offsetof(struct caam_hash_ctx,
1733                                         sh_desc_update_dma),
1734                                ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1735         caam_jr_free(ctx->jrdev);
1736 }
1737
1738 static void __exit caam_algapi_hash_exit(void)
1739 {
1740         struct caam_hash_alg *t_alg, *n;
1741
1742         if (!hash_list.next)
1743                 return;
1744
1745         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1746                 crypto_unregister_ahash(&t_alg->ahash_alg);
1747                 list_del(&t_alg->entry);
1748                 kfree(t_alg);
1749         }
1750 }
1751
1752 static struct caam_hash_alg *
1753 caam_hash_alloc(struct caam_hash_template *template,
1754                 bool keyed)
1755 {
1756         struct caam_hash_alg *t_alg;
1757         struct ahash_alg *halg;
1758         struct crypto_alg *alg;
1759
1760         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1761         if (!t_alg) {
1762                 pr_err("failed to allocate t_alg\n");
1763                 return ERR_PTR(-ENOMEM);
1764         }
1765
1766         t_alg->ahash_alg = template->template_ahash;
1767         halg = &t_alg->ahash_alg;
1768         alg = &halg->halg.base;
1769
1770         if (keyed) {
1771                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1772                          template->hmac_name);
1773                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1774                          template->hmac_driver_name);
1775         } else {
1776                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1777                          template->name);
1778                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1779                          template->driver_name);
1780                 t_alg->ahash_alg.setkey = NULL;
1781         }
1782         alg->cra_module = THIS_MODULE;
1783         alg->cra_init = caam_hash_cra_init;
1784         alg->cra_exit = caam_hash_cra_exit;
1785         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1786         alg->cra_priority = CAAM_CRA_PRIORITY;
1787         alg->cra_blocksize = template->blocksize;
1788         alg->cra_alignmask = 0;
1789         alg->cra_flags = CRYPTO_ALG_ASYNC;
1790
1791         t_alg->alg_type = template->alg_type;
1792
1793         return t_alg;
1794 }
1795
1796 static int __init caam_algapi_hash_init(void)
1797 {
1798         struct device_node *dev_node;
1799         struct platform_device *pdev;
1800         struct device *ctrldev;
1801         int i = 0, err = 0;
1802         struct caam_drv_private *priv;
1803         unsigned int md_limit = SHA512_DIGEST_SIZE;
1804         u32 cha_inst, cha_vid;
1805
1806         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1807         if (!dev_node) {
1808                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1809                 if (!dev_node)
1810                         return -ENODEV;
1811         }
1812
1813         pdev = of_find_device_by_node(dev_node);
1814         if (!pdev) {
1815                 of_node_put(dev_node);
1816                 return -ENODEV;
1817         }
1818
1819         ctrldev = &pdev->dev;
1820         priv = dev_get_drvdata(ctrldev);
1821         of_node_put(dev_node);
1822
1823         /*
1824          * If priv is NULL, it's probably because the caam driver wasn't
1825          * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1826          */
1827         if (!priv)
1828                 return -ENODEV;
1829
1830         /*
1831          * Register crypto algorithms the device supports.  First, identify
1832          * presence and attributes of MD block.
1833          */
1834         cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1835         cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1836
1837         /*
1838          * Skip registration of any hashing algorithms if MD block
1839          * is not present.
1840          */
1841         if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1842                 return -ENODEV;
1843
1844         /* Limit digest size based on LP256 */
1845         if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1846                 md_limit = SHA256_DIGEST_SIZE;
1847
1848         INIT_LIST_HEAD(&hash_list);
1849
1850         /* register crypto algorithms the device supports */
1851         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1852                 struct caam_hash_alg *t_alg;
1853                 struct caam_hash_template *alg = driver_hash + i;
1854
1855                 /* If MD size is not supported by device, skip registration */
1856                 if (alg->template_ahash.halg.digestsize > md_limit)
1857                         continue;
1858
1859                 /* register hmac version */
1860                 t_alg = caam_hash_alloc(alg, true);
1861                 if (IS_ERR(t_alg)) {
1862                         err = PTR_ERR(t_alg);
1863                         pr_warn("%s alg allocation failed\n", alg->driver_name);
1864                         continue;
1865                 }
1866
1867                 err = crypto_register_ahash(&t_alg->ahash_alg);
1868                 if (err) {
1869                         pr_warn("%s alg registration failed: %d\n",
1870                                 t_alg->ahash_alg.halg.base.cra_driver_name,
1871                                 err);
1872                         kfree(t_alg);
1873                 } else
1874                         list_add_tail(&t_alg->entry, &hash_list);
1875
1876                 /* register unkeyed version */
1877                 t_alg = caam_hash_alloc(alg, false);
1878                 if (IS_ERR(t_alg)) {
1879                         err = PTR_ERR(t_alg);
1880                         pr_warn("%s alg allocation failed\n", alg->driver_name);
1881                         continue;
1882                 }
1883
1884                 err = crypto_register_ahash(&t_alg->ahash_alg);
1885                 if (err) {
1886                         pr_warn("%s alg registration failed: %d\n",
1887                                 t_alg->ahash_alg.halg.base.cra_driver_name,
1888                                 err);
1889                         kfree(t_alg);
1890                 } else
1891                         list_add_tail(&t_alg->entry, &hash_list);
1892         }
1893
1894         return err;
1895 }
1896
1897 module_init(caam_algapi_hash_init);
1898 module_exit(caam_algapi_hash_exit);
1899
1900 MODULE_LICENSE("GPL");
1901 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1902 MODULE_AUTHOR("Freescale Semiconductor - NMG");