1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
69 #define CAAM_CRA_PRIORITY 3000
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN 8
83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
86 /* for print_hex_dumps with line references */
87 #define debug(format, arg...) printk(format, arg)
89 #define debug(format, arg...)
93 static struct list_head hash_list;
95 /* ahash per-session context */
96 struct caam_hash_ctx {
97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
101 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102 dma_addr_t sh_desc_update_first_dma;
103 dma_addr_t sh_desc_fin_dma;
104 dma_addr_t sh_desc_digest_dma;
105 enum dma_data_direction dir;
106 struct device *jrdev;
107 u8 key[CAAM_MAX_HASH_KEY_SIZE];
109 struct alginfo adata;
113 struct caam_hash_state {
116 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
120 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121 int (*update)(struct ahash_request *req);
122 int (*final)(struct ahash_request *req);
123 int (*finup)(struct ahash_request *req);
127 struct caam_export_state {
128 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
129 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
136 static inline void switch_buf(struct caam_hash_state *state)
138 state->current_buf ^= 1;
141 static inline u8 *current_buf(struct caam_hash_state *state)
143 return state->current_buf ? state->buf_1 : state->buf_0;
146 static inline u8 *alt_buf(struct caam_hash_state *state)
148 return state->current_buf ? state->buf_0 : state->buf_1;
151 static inline int *current_buflen(struct caam_hash_state *state)
153 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
156 static inline int *alt_buflen(struct caam_hash_state *state)
158 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
161 /* Common job descriptor seq in/out ptr routines */
163 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
164 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
165 struct caam_hash_state *state,
168 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
169 ctx_len, DMA_FROM_DEVICE);
170 if (dma_mapping_error(jrdev, state->ctx_dma)) {
171 dev_err(jrdev, "unable to map ctx\n");
176 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
181 /* Map req->result, and append seq_out_ptr command that points to it */
182 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
183 u8 *result, int digestsize)
187 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
188 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
193 /* Map current buffer in state (if length > 0) and put it in link table */
194 static inline int buf_map_to_sec4_sg(struct device *jrdev,
195 struct sec4_sg_entry *sec4_sg,
196 struct caam_hash_state *state)
198 int buflen = *current_buflen(state);
203 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
205 if (dma_mapping_error(jrdev, state->buf_dma)) {
206 dev_err(jrdev, "unable to map buf\n");
211 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
216 /* Map state->caam_ctx, and add it to link table */
217 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
218 struct caam_hash_state *state, int ctx_len,
219 struct sec4_sg_entry *sec4_sg, u32 flag)
221 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
222 if (dma_mapping_error(jrdev, state->ctx_dma)) {
223 dev_err(jrdev, "unable to map ctx\n");
228 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
233 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
235 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
236 int digestsize = crypto_ahash_digestsize(ahash);
237 struct device *jrdev = ctx->jrdev;
238 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
241 ctx->adata.key_virt = ctx->key;
243 /* ahash_update shared descriptor */
244 desc = ctx->sh_desc_update;
245 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
246 ctx->ctx_len, true, ctrlpriv->era);
247 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
248 desc_bytes(desc), ctx->dir);
250 print_hex_dump(KERN_ERR,
251 "ahash update shdesc@"__stringify(__LINE__)": ",
252 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
255 /* ahash_update_first shared descriptor */
256 desc = ctx->sh_desc_update_first;
257 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
258 ctx->ctx_len, false, ctrlpriv->era);
259 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
260 desc_bytes(desc), ctx->dir);
262 print_hex_dump(KERN_ERR,
263 "ahash update first shdesc@"__stringify(__LINE__)": ",
264 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
267 /* ahash_final shared descriptor */
268 desc = ctx->sh_desc_fin;
269 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
270 ctx->ctx_len, true, ctrlpriv->era);
271 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
272 desc_bytes(desc), ctx->dir);
274 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
275 DUMP_PREFIX_ADDRESS, 16, 4, desc,
276 desc_bytes(desc), 1);
279 /* ahash_digest shared descriptor */
280 desc = ctx->sh_desc_digest;
281 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
282 ctx->ctx_len, false, ctrlpriv->era);
283 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
284 desc_bytes(desc), ctx->dir);
286 print_hex_dump(KERN_ERR,
287 "ahash digest shdesc@"__stringify(__LINE__)": ",
288 DUMP_PREFIX_ADDRESS, 16, 4, desc,
289 desc_bytes(desc), 1);
295 /* Digest hash size if it is too large */
296 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
297 u32 *keylen, u8 *key_out, u32 digestsize)
299 struct device *jrdev = ctx->jrdev;
301 struct split_key_result result;
302 dma_addr_t src_dma, dst_dma;
305 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
307 dev_err(jrdev, "unable to allocate key input memory\n");
311 init_job_desc(desc, 0);
313 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
315 if (dma_mapping_error(jrdev, src_dma)) {
316 dev_err(jrdev, "unable to map key input memory\n");
320 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
322 if (dma_mapping_error(jrdev, dst_dma)) {
323 dev_err(jrdev, "unable to map key output memory\n");
324 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
329 /* Job descriptor to perform unkeyed hash on key_in */
330 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
331 OP_ALG_AS_INITFINAL);
332 append_seq_in_ptr(desc, src_dma, *keylen, 0);
333 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
334 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
335 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
336 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
337 LDST_SRCDST_BYTE_CONTEXT);
340 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
341 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
342 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
343 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
347 init_completion(&result.completion);
349 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
352 wait_for_completion(&result.completion);
355 print_hex_dump(KERN_ERR,
356 "digested key@"__stringify(__LINE__)": ",
357 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
361 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
362 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
364 *keylen = digestsize;
371 static int ahash_setkey(struct crypto_ahash *ahash,
372 const u8 *key, unsigned int keylen)
374 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
375 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
376 int digestsize = crypto_ahash_digestsize(ahash);
377 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
379 u8 *hashed_key = NULL;
382 printk(KERN_ERR "keylen %d\n", keylen);
385 if (keylen > blocksize) {
386 hashed_key = kmalloc_array(digestsize,
388 GFP_KERNEL | GFP_DMA);
391 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
399 * If DKP is supported, use it in the shared descriptor to generate
402 if (ctrlpriv->era >= 6) {
403 ctx->adata.key_inline = true;
404 ctx->adata.keylen = keylen;
405 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
408 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
411 memcpy(ctx->key, key, keylen);
413 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
414 keylen, CAAM_MAX_HASH_KEY_SIZE);
420 return ahash_set_sh_desc(ahash);
423 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
428 * ahash_edesc - s/w-extended ahash descriptor
429 * @dst_dma: physical mapped address of req->result
430 * @sec4_sg_dma: physical mapped address of h/w link table
431 * @src_nents: number of segments in input scatterlist
432 * @sec4_sg_bytes: length of dma mapped sec4_sg space
433 * @hw_desc: the h/w job descriptor followed by any referenced link tables
434 * @sec4_sg: h/w link table
438 dma_addr_t sec4_sg_dma;
441 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
442 struct sec4_sg_entry sec4_sg[0];
445 static inline void ahash_unmap(struct device *dev,
446 struct ahash_edesc *edesc,
447 struct ahash_request *req, int dst_len)
449 struct caam_hash_state *state = ahash_request_ctx(req);
451 if (edesc->src_nents)
452 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
454 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
456 if (edesc->sec4_sg_bytes)
457 dma_unmap_single(dev, edesc->sec4_sg_dma,
458 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
460 if (state->buf_dma) {
461 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
467 static inline void ahash_unmap_ctx(struct device *dev,
468 struct ahash_edesc *edesc,
469 struct ahash_request *req, int dst_len, u32 flag)
471 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
472 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
473 struct caam_hash_state *state = ahash_request_ctx(req);
475 if (state->ctx_dma) {
476 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
479 ahash_unmap(dev, edesc, req, dst_len);
482 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
485 struct ahash_request *req = context;
486 struct ahash_edesc *edesc;
487 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
488 int digestsize = crypto_ahash_digestsize(ahash);
490 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
491 struct caam_hash_state *state = ahash_request_ctx(req);
493 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
496 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
498 caam_jr_strstatus(jrdev, err);
500 ahash_unmap(jrdev, edesc, req, digestsize);
504 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
505 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
508 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
509 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
513 req->base.complete(&req->base, err);
516 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
519 struct ahash_request *req = context;
520 struct ahash_edesc *edesc;
521 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
522 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
523 struct caam_hash_state *state = ahash_request_ctx(req);
525 int digestsize = crypto_ahash_digestsize(ahash);
527 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
530 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
532 caam_jr_strstatus(jrdev, err);
534 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
539 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
540 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
543 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
544 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
548 req->base.complete(&req->base, err);
551 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
554 struct ahash_request *req = context;
555 struct ahash_edesc *edesc;
556 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
557 int digestsize = crypto_ahash_digestsize(ahash);
559 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
560 struct caam_hash_state *state = ahash_request_ctx(req);
562 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
565 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
567 caam_jr_strstatus(jrdev, err);
569 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
573 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
574 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
577 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
578 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
582 req->base.complete(&req->base, err);
585 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
588 struct ahash_request *req = context;
589 struct ahash_edesc *edesc;
590 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
592 struct caam_hash_state *state = ahash_request_ctx(req);
594 int digestsize = crypto_ahash_digestsize(ahash);
596 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
601 caam_jr_strstatus(jrdev, err);
603 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
608 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
609 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
612 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
613 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
617 req->base.complete(&req->base, err);
621 * Allocate an enhanced descriptor, which contains the hardware descriptor
622 * and space for hardware scatter table containing sg_num entries.
624 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
625 int sg_num, u32 *sh_desc,
626 dma_addr_t sh_desc_dma,
629 struct ahash_edesc *edesc;
630 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
632 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
634 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
638 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
639 HDR_SHARE_DEFER | HDR_REVERSE);
644 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
645 struct ahash_edesc *edesc,
646 struct ahash_request *req, int nents,
647 unsigned int first_sg,
648 unsigned int first_bytes, size_t to_hash)
653 if (nents > 1 || first_sg) {
654 struct sec4_sg_entry *sg = edesc->sec4_sg;
655 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
657 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
659 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
660 if (dma_mapping_error(ctx->jrdev, src_dma)) {
661 dev_err(ctx->jrdev, "unable to map S/G table\n");
665 edesc->sec4_sg_bytes = sgsize;
666 edesc->sec4_sg_dma = src_dma;
669 src_dma = sg_dma_address(req->src);
673 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
679 /* submit update job descriptor */
680 static int ahash_update_ctx(struct ahash_request *req)
682 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
683 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
684 struct caam_hash_state *state = ahash_request_ctx(req);
685 struct device *jrdev = ctx->jrdev;
686 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
687 GFP_KERNEL : GFP_ATOMIC;
688 u8 *buf = current_buf(state);
689 int *buflen = current_buflen(state);
690 u8 *next_buf = alt_buf(state);
691 int *next_buflen = alt_buflen(state), last_buflen;
692 int in_len = *buflen + req->nbytes, to_hash;
694 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
695 struct ahash_edesc *edesc;
698 last_buflen = *next_buflen;
699 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
700 to_hash = in_len - *next_buflen;
703 src_nents = sg_nents_for_len(req->src,
704 req->nbytes - (*next_buflen));
706 dev_err(jrdev, "Invalid number of src SG.\n");
711 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
714 dev_err(jrdev, "unable to DMA map source\n");
721 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
722 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
723 sizeof(struct sec4_sg_entry);
726 * allocate space for base edesc and hw desc commands,
729 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
731 ctx->sh_desc_update_dma, flags);
733 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
737 edesc->src_nents = src_nents;
738 edesc->sec4_sg_bytes = sec4_sg_bytes;
740 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
741 edesc->sec4_sg, DMA_BIDIRECTIONAL);
745 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
750 sg_to_sec4_sg_last(req->src, mapped_nents,
751 edesc->sec4_sg + sec4_sg_src_index,
754 scatterwalk_map_and_copy(next_buf, req->src,
758 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
762 desc = edesc->hw_desc;
764 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
767 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
768 dev_err(jrdev, "unable to map S/G table\n");
773 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
776 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
779 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
780 DUMP_PREFIX_ADDRESS, 16, 4, desc,
781 desc_bytes(desc), 1);
784 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
789 } else if (*next_buflen) {
790 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
792 *buflen = *next_buflen;
793 *next_buflen = last_buflen;
796 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
797 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
798 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
799 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
805 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
810 static int ahash_final_ctx(struct ahash_request *req)
812 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
813 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
814 struct caam_hash_state *state = ahash_request_ctx(req);
815 struct device *jrdev = ctx->jrdev;
816 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
817 GFP_KERNEL : GFP_ATOMIC;
818 int buflen = *current_buflen(state);
820 int sec4_sg_bytes, sec4_sg_src_index;
821 int digestsize = crypto_ahash_digestsize(ahash);
822 struct ahash_edesc *edesc;
825 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
826 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
828 /* allocate space for base edesc and hw desc commands, link tables */
829 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
830 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
835 desc = edesc->hw_desc;
837 edesc->sec4_sg_bytes = sec4_sg_bytes;
839 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
840 edesc->sec4_sg, DMA_TO_DEVICE);
844 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
848 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
850 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
851 sec4_sg_bytes, DMA_TO_DEVICE);
852 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
853 dev_err(jrdev, "unable to map S/G table\n");
858 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
861 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
863 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
864 dev_err(jrdev, "unable to map dst\n");
870 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
871 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
874 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
880 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
885 static int ahash_finup_ctx(struct ahash_request *req)
887 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
888 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
889 struct caam_hash_state *state = ahash_request_ctx(req);
890 struct device *jrdev = ctx->jrdev;
891 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
892 GFP_KERNEL : GFP_ATOMIC;
893 int buflen = *current_buflen(state);
895 int sec4_sg_src_index;
896 int src_nents, mapped_nents;
897 int digestsize = crypto_ahash_digestsize(ahash);
898 struct ahash_edesc *edesc;
901 src_nents = sg_nents_for_len(req->src, req->nbytes);
903 dev_err(jrdev, "Invalid number of src SG.\n");
908 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
911 dev_err(jrdev, "unable to DMA map source\n");
918 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
920 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
922 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
925 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
929 desc = edesc->hw_desc;
931 edesc->src_nents = src_nents;
933 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
934 edesc->sec4_sg, DMA_TO_DEVICE);
938 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
942 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
943 sec4_sg_src_index, ctx->ctx_len + buflen,
948 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
950 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
951 dev_err(jrdev, "unable to map dst\n");
957 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
958 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
961 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
967 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
972 static int ahash_digest(struct ahash_request *req)
974 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
975 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
976 struct caam_hash_state *state = ahash_request_ctx(req);
977 struct device *jrdev = ctx->jrdev;
978 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
979 GFP_KERNEL : GFP_ATOMIC;
981 int digestsize = crypto_ahash_digestsize(ahash);
982 int src_nents, mapped_nents;
983 struct ahash_edesc *edesc;
988 src_nents = sg_nents_for_len(req->src, req->nbytes);
990 dev_err(jrdev, "Invalid number of src SG.\n");
995 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
998 dev_err(jrdev, "unable to map source for DMA\n");
1005 /* allocate space for base edesc and hw desc commands, link tables */
1006 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1007 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1010 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1014 edesc->src_nents = src_nents;
1016 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1019 ahash_unmap(jrdev, edesc, req, digestsize);
1024 desc = edesc->hw_desc;
1026 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1028 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1029 dev_err(jrdev, "unable to map dst\n");
1030 ahash_unmap(jrdev, edesc, req, digestsize);
1036 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1037 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1040 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1044 ahash_unmap(jrdev, edesc, req, digestsize);
1051 /* submit ahash final if it the first job descriptor */
1052 static int ahash_final_no_ctx(struct ahash_request *req)
1054 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1055 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1056 struct caam_hash_state *state = ahash_request_ctx(req);
1057 struct device *jrdev = ctx->jrdev;
1058 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1059 GFP_KERNEL : GFP_ATOMIC;
1060 u8 *buf = current_buf(state);
1061 int buflen = *current_buflen(state);
1063 int digestsize = crypto_ahash_digestsize(ahash);
1064 struct ahash_edesc *edesc;
1067 /* allocate space for base edesc and hw desc commands, link tables */
1068 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1069 ctx->sh_desc_digest_dma, flags);
1073 desc = edesc->hw_desc;
1075 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1076 if (dma_mapping_error(jrdev, state->buf_dma)) {
1077 dev_err(jrdev, "unable to map src\n");
1081 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1083 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1085 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1086 dev_err(jrdev, "unable to map dst\n");
1091 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1092 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1095 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1099 ahash_unmap(jrdev, edesc, req, digestsize);
1105 ahash_unmap(jrdev, edesc, req, digestsize);
1111 /* submit ahash update if it the first job descriptor after update */
1112 static int ahash_update_no_ctx(struct ahash_request *req)
1114 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1115 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1116 struct caam_hash_state *state = ahash_request_ctx(req);
1117 struct device *jrdev = ctx->jrdev;
1118 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1119 GFP_KERNEL : GFP_ATOMIC;
1120 u8 *buf = current_buf(state);
1121 int *buflen = current_buflen(state);
1122 u8 *next_buf = alt_buf(state);
1123 int *next_buflen = alt_buflen(state);
1124 int in_len = *buflen + req->nbytes, to_hash;
1125 int sec4_sg_bytes, src_nents, mapped_nents;
1126 struct ahash_edesc *edesc;
1130 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1131 to_hash = in_len - *next_buflen;
1134 src_nents = sg_nents_for_len(req->src,
1135 req->nbytes - *next_buflen);
1136 if (src_nents < 0) {
1137 dev_err(jrdev, "Invalid number of src SG.\n");
1142 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1144 if (!mapped_nents) {
1145 dev_err(jrdev, "unable to DMA map source\n");
1152 sec4_sg_bytes = (1 + mapped_nents) *
1153 sizeof(struct sec4_sg_entry);
1156 * allocate space for base edesc and hw desc commands,
1159 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1160 ctx->sh_desc_update_first,
1161 ctx->sh_desc_update_first_dma,
1164 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1168 edesc->src_nents = src_nents;
1169 edesc->sec4_sg_bytes = sec4_sg_bytes;
1171 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1175 sg_to_sec4_sg_last(req->src, mapped_nents,
1176 edesc->sec4_sg + 1, 0);
1179 scatterwalk_map_and_copy(next_buf, req->src,
1184 desc = edesc->hw_desc;
1186 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1189 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1190 dev_err(jrdev, "unable to map S/G table\n");
1195 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1197 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1202 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1203 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1204 desc_bytes(desc), 1);
1207 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1212 state->update = ahash_update_ctx;
1213 state->finup = ahash_finup_ctx;
1214 state->final = ahash_final_ctx;
1215 } else if (*next_buflen) {
1216 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1218 *buflen = *next_buflen;
1222 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1223 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1224 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1225 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1231 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1236 /* submit ahash finup if it the first job descriptor after update */
1237 static int ahash_finup_no_ctx(struct ahash_request *req)
1239 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1240 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1241 struct caam_hash_state *state = ahash_request_ctx(req);
1242 struct device *jrdev = ctx->jrdev;
1243 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1244 GFP_KERNEL : GFP_ATOMIC;
1245 int buflen = *current_buflen(state);
1247 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1248 int digestsize = crypto_ahash_digestsize(ahash);
1249 struct ahash_edesc *edesc;
1252 src_nents = sg_nents_for_len(req->src, req->nbytes);
1253 if (src_nents < 0) {
1254 dev_err(jrdev, "Invalid number of src SG.\n");
1259 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1261 if (!mapped_nents) {
1262 dev_err(jrdev, "unable to DMA map source\n");
1269 sec4_sg_src_index = 2;
1270 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1271 sizeof(struct sec4_sg_entry);
1273 /* allocate space for base edesc and hw desc commands, link tables */
1274 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1275 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1278 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1282 desc = edesc->hw_desc;
1284 edesc->src_nents = src_nents;
1285 edesc->sec4_sg_bytes = sec4_sg_bytes;
1287 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1291 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1294 dev_err(jrdev, "unable to map S/G table\n");
1298 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1300 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1301 dev_err(jrdev, "unable to map dst\n");
1306 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1307 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1310 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1314 ahash_unmap(jrdev, edesc, req, digestsize);
1320 ahash_unmap(jrdev, edesc, req, digestsize);
1326 /* submit first update job descriptor after init */
1327 static int ahash_update_first(struct ahash_request *req)
1329 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1330 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1331 struct caam_hash_state *state = ahash_request_ctx(req);
1332 struct device *jrdev = ctx->jrdev;
1333 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1334 GFP_KERNEL : GFP_ATOMIC;
1335 u8 *next_buf = alt_buf(state);
1336 int *next_buflen = alt_buflen(state);
1339 int src_nents, mapped_nents;
1340 struct ahash_edesc *edesc;
1343 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1345 to_hash = req->nbytes - *next_buflen;
1348 src_nents = sg_nents_for_len(req->src,
1349 req->nbytes - *next_buflen);
1350 if (src_nents < 0) {
1351 dev_err(jrdev, "Invalid number of src SG.\n");
1356 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1358 if (!mapped_nents) {
1359 dev_err(jrdev, "unable to map source for DMA\n");
1367 * allocate space for base edesc and hw desc commands,
1370 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1372 ctx->sh_desc_update_first,
1373 ctx->sh_desc_update_first_dma,
1376 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1380 edesc->src_nents = src_nents;
1382 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1388 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1391 desc = edesc->hw_desc;
1393 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1398 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1399 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1400 desc_bytes(desc), 1);
1403 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1408 state->update = ahash_update_ctx;
1409 state->finup = ahash_finup_ctx;
1410 state->final = ahash_final_ctx;
1411 } else if (*next_buflen) {
1412 state->update = ahash_update_no_ctx;
1413 state->finup = ahash_finup_no_ctx;
1414 state->final = ahash_final_no_ctx;
1415 scatterwalk_map_and_copy(next_buf, req->src, 0,
1420 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1421 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1427 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1432 static int ahash_finup_first(struct ahash_request *req)
1434 return ahash_digest(req);
1437 static int ahash_init(struct ahash_request *req)
1439 struct caam_hash_state *state = ahash_request_ctx(req);
1441 state->update = ahash_update_first;
1442 state->finup = ahash_finup_first;
1443 state->final = ahash_final_no_ctx;
1446 state->current_buf = 0;
1448 state->buflen_0 = 0;
1449 state->buflen_1 = 0;
1454 static int ahash_update(struct ahash_request *req)
1456 struct caam_hash_state *state = ahash_request_ctx(req);
1458 return state->update(req);
1461 static int ahash_finup(struct ahash_request *req)
1463 struct caam_hash_state *state = ahash_request_ctx(req);
1465 return state->finup(req);
1468 static int ahash_final(struct ahash_request *req)
1470 struct caam_hash_state *state = ahash_request_ctx(req);
1472 return state->final(req);
1475 static int ahash_export(struct ahash_request *req, void *out)
1477 struct caam_hash_state *state = ahash_request_ctx(req);
1478 struct caam_export_state *export = out;
1482 if (state->current_buf) {
1484 len = state->buflen_1;
1487 len = state->buflen_0;
1490 memcpy(export->buf, buf, len);
1491 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1492 export->buflen = len;
1493 export->update = state->update;
1494 export->final = state->final;
1495 export->finup = state->finup;
1500 static int ahash_import(struct ahash_request *req, const void *in)
1502 struct caam_hash_state *state = ahash_request_ctx(req);
1503 const struct caam_export_state *export = in;
1505 memset(state, 0, sizeof(*state));
1506 memcpy(state->buf_0, export->buf, export->buflen);
1507 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1508 state->buflen_0 = export->buflen;
1509 state->update = export->update;
1510 state->final = export->final;
1511 state->finup = export->finup;
1516 struct caam_hash_template {
1517 char name[CRYPTO_MAX_ALG_NAME];
1518 char driver_name[CRYPTO_MAX_ALG_NAME];
1519 char hmac_name[CRYPTO_MAX_ALG_NAME];
1520 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1521 unsigned int blocksize;
1522 struct ahash_alg template_ahash;
1526 /* ahash descriptors */
1527 static struct caam_hash_template driver_hash[] = {
1530 .driver_name = "sha1-caam",
1531 .hmac_name = "hmac(sha1)",
1532 .hmac_driver_name = "hmac-sha1-caam",
1533 .blocksize = SHA1_BLOCK_SIZE,
1536 .update = ahash_update,
1537 .final = ahash_final,
1538 .finup = ahash_finup,
1539 .digest = ahash_digest,
1540 .export = ahash_export,
1541 .import = ahash_import,
1542 .setkey = ahash_setkey,
1544 .digestsize = SHA1_DIGEST_SIZE,
1545 .statesize = sizeof(struct caam_export_state),
1548 .alg_type = OP_ALG_ALGSEL_SHA1,
1551 .driver_name = "sha224-caam",
1552 .hmac_name = "hmac(sha224)",
1553 .hmac_driver_name = "hmac-sha224-caam",
1554 .blocksize = SHA224_BLOCK_SIZE,
1557 .update = ahash_update,
1558 .final = ahash_final,
1559 .finup = ahash_finup,
1560 .digest = ahash_digest,
1561 .export = ahash_export,
1562 .import = ahash_import,
1563 .setkey = ahash_setkey,
1565 .digestsize = SHA224_DIGEST_SIZE,
1566 .statesize = sizeof(struct caam_export_state),
1569 .alg_type = OP_ALG_ALGSEL_SHA224,
1572 .driver_name = "sha256-caam",
1573 .hmac_name = "hmac(sha256)",
1574 .hmac_driver_name = "hmac-sha256-caam",
1575 .blocksize = SHA256_BLOCK_SIZE,
1578 .update = ahash_update,
1579 .final = ahash_final,
1580 .finup = ahash_finup,
1581 .digest = ahash_digest,
1582 .export = ahash_export,
1583 .import = ahash_import,
1584 .setkey = ahash_setkey,
1586 .digestsize = SHA256_DIGEST_SIZE,
1587 .statesize = sizeof(struct caam_export_state),
1590 .alg_type = OP_ALG_ALGSEL_SHA256,
1593 .driver_name = "sha384-caam",
1594 .hmac_name = "hmac(sha384)",
1595 .hmac_driver_name = "hmac-sha384-caam",
1596 .blocksize = SHA384_BLOCK_SIZE,
1599 .update = ahash_update,
1600 .final = ahash_final,
1601 .finup = ahash_finup,
1602 .digest = ahash_digest,
1603 .export = ahash_export,
1604 .import = ahash_import,
1605 .setkey = ahash_setkey,
1607 .digestsize = SHA384_DIGEST_SIZE,
1608 .statesize = sizeof(struct caam_export_state),
1611 .alg_type = OP_ALG_ALGSEL_SHA384,
1614 .driver_name = "sha512-caam",
1615 .hmac_name = "hmac(sha512)",
1616 .hmac_driver_name = "hmac-sha512-caam",
1617 .blocksize = SHA512_BLOCK_SIZE,
1620 .update = ahash_update,
1621 .final = ahash_final,
1622 .finup = ahash_finup,
1623 .digest = ahash_digest,
1624 .export = ahash_export,
1625 .import = ahash_import,
1626 .setkey = ahash_setkey,
1628 .digestsize = SHA512_DIGEST_SIZE,
1629 .statesize = sizeof(struct caam_export_state),
1632 .alg_type = OP_ALG_ALGSEL_SHA512,
1635 .driver_name = "md5-caam",
1636 .hmac_name = "hmac(md5)",
1637 .hmac_driver_name = "hmac-md5-caam",
1638 .blocksize = MD5_BLOCK_WORDS * 4,
1641 .update = ahash_update,
1642 .final = ahash_final,
1643 .finup = ahash_finup,
1644 .digest = ahash_digest,
1645 .export = ahash_export,
1646 .import = ahash_import,
1647 .setkey = ahash_setkey,
1649 .digestsize = MD5_DIGEST_SIZE,
1650 .statesize = sizeof(struct caam_export_state),
1653 .alg_type = OP_ALG_ALGSEL_MD5,
1657 struct caam_hash_alg {
1658 struct list_head entry;
1660 struct ahash_alg ahash_alg;
1663 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1665 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1666 struct crypto_alg *base = tfm->__crt_alg;
1667 struct hash_alg_common *halg =
1668 container_of(base, struct hash_alg_common, base);
1669 struct ahash_alg *alg =
1670 container_of(halg, struct ahash_alg, halg);
1671 struct caam_hash_alg *caam_hash =
1672 container_of(alg, struct caam_hash_alg, ahash_alg);
1673 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1676 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1681 dma_addr_t dma_addr;
1682 struct caam_drv_private *priv;
1685 * Get a Job ring from Job Ring driver to ensure in-order
1686 * crypto request processing per tfm
1688 ctx->jrdev = caam_jr_alloc();
1689 if (IS_ERR(ctx->jrdev)) {
1690 pr_err("Job Ring Device allocation for transform failed\n");
1691 return PTR_ERR(ctx->jrdev);
1694 priv = dev_get_drvdata(ctx->jrdev->parent);
1695 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1697 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1698 offsetof(struct caam_hash_ctx,
1699 sh_desc_update_dma),
1700 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1701 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1702 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1703 caam_jr_free(ctx->jrdev);
1707 ctx->sh_desc_update_dma = dma_addr;
1708 ctx->sh_desc_update_first_dma = dma_addr +
1709 offsetof(struct caam_hash_ctx,
1710 sh_desc_update_first);
1711 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1713 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1716 /* copy descriptor header template value */
1717 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1719 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1720 OP_ALG_ALGSEL_SUBMASK) >>
1721 OP_ALG_ALGSEL_SHIFT];
1723 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1724 sizeof(struct caam_hash_state));
1725 return ahash_set_sh_desc(ahash);
1728 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1730 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1732 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1733 offsetof(struct caam_hash_ctx,
1734 sh_desc_update_dma),
1735 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1736 caam_jr_free(ctx->jrdev);
1739 static void __exit caam_algapi_hash_exit(void)
1741 struct caam_hash_alg *t_alg, *n;
1743 if (!hash_list.next)
1746 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1747 crypto_unregister_ahash(&t_alg->ahash_alg);
1748 list_del(&t_alg->entry);
1753 static struct caam_hash_alg *
1754 caam_hash_alloc(struct caam_hash_template *template,
1757 struct caam_hash_alg *t_alg;
1758 struct ahash_alg *halg;
1759 struct crypto_alg *alg;
1761 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1763 pr_err("failed to allocate t_alg\n");
1764 return ERR_PTR(-ENOMEM);
1767 t_alg->ahash_alg = template->template_ahash;
1768 halg = &t_alg->ahash_alg;
1769 alg = &halg->halg.base;
1772 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1773 template->hmac_name);
1774 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1775 template->hmac_driver_name);
1777 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1779 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1780 template->driver_name);
1781 t_alg->ahash_alg.setkey = NULL;
1783 alg->cra_module = THIS_MODULE;
1784 alg->cra_init = caam_hash_cra_init;
1785 alg->cra_exit = caam_hash_cra_exit;
1786 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1787 alg->cra_priority = CAAM_CRA_PRIORITY;
1788 alg->cra_blocksize = template->blocksize;
1789 alg->cra_alignmask = 0;
1790 alg->cra_flags = CRYPTO_ALG_ASYNC;
1792 t_alg->alg_type = template->alg_type;
1797 static int __init caam_algapi_hash_init(void)
1799 struct device_node *dev_node;
1800 struct platform_device *pdev;
1801 struct device *ctrldev;
1803 struct caam_drv_private *priv;
1804 unsigned int md_limit = SHA512_DIGEST_SIZE;
1805 u32 md_inst, md_vid;
1807 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1809 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1814 pdev = of_find_device_by_node(dev_node);
1816 of_node_put(dev_node);
1820 ctrldev = &pdev->dev;
1821 priv = dev_get_drvdata(ctrldev);
1822 of_node_put(dev_node);
1825 * If priv is NULL, it's probably because the caam driver wasn't
1826 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1832 * Register crypto algorithms the device supports. First, identify
1833 * presence and attributes of MD block.
1835 if (priv->era < 10) {
1836 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1837 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1838 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1839 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1841 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1843 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1844 md_inst = mdha & CHA_VER_NUM_MASK;
1848 * Skip registration of any hashing algorithms if MD block
1854 /* Limit digest size based on LP256 */
1855 if (md_vid == CHA_VER_VID_MD_LP256)
1856 md_limit = SHA256_DIGEST_SIZE;
1858 INIT_LIST_HEAD(&hash_list);
1860 /* register crypto algorithms the device supports */
1861 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1862 struct caam_hash_alg *t_alg;
1863 struct caam_hash_template *alg = driver_hash + i;
1865 /* If MD size is not supported by device, skip registration */
1866 if (alg->template_ahash.halg.digestsize > md_limit)
1869 /* register hmac version */
1870 t_alg = caam_hash_alloc(alg, true);
1871 if (IS_ERR(t_alg)) {
1872 err = PTR_ERR(t_alg);
1873 pr_warn("%s alg allocation failed\n", alg->driver_name);
1877 err = crypto_register_ahash(&t_alg->ahash_alg);
1879 pr_warn("%s alg registration failed: %d\n",
1880 t_alg->ahash_alg.halg.base.cra_driver_name,
1884 list_add_tail(&t_alg->entry, &hash_list);
1886 /* register unkeyed version */
1887 t_alg = caam_hash_alloc(alg, false);
1888 if (IS_ERR(t_alg)) {
1889 err = PTR_ERR(t_alg);
1890 pr_warn("%s alg allocation failed\n", alg->driver_name);
1894 err = crypto_register_ahash(&t_alg->ahash_alg);
1896 pr_warn("%s alg registration failed: %d\n",
1897 t_alg->ahash_alg.halg.base.cra_driver_name,
1901 list_add_tail(&t_alg->entry, &hash_list);
1907 module_init(caam_algapi_hash_init);
1908 module_exit(caam_algapi_hash_exit);
1910 MODULE_LICENSE("GPL");
1911 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1912 MODULE_AUTHOR("Freescale Semiconductor - NMG");