1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
7 * Based on caamalg.c crypto API driver.
9 * relationship of digest job descriptor or first job descriptor after init to
12 * --------------- ---------------
13 * | JobDesc #1 |-------------------->| ShareDesc |
14 * | *(packet 1) | | (hashKey) |
15 * --------------- | (operation) |
18 * relationship of subsequent job descriptors to shared descriptors:
20 * --------------- ---------------
21 * | JobDesc #2 |-------------------->| ShareDesc |
22 * | *(packet 2) | |------------->| (hashKey) |
23 * --------------- | |-------->| (operation) |
24 * . | | | (load ctx2) |
25 * . | | ---------------
27 * | JobDesc #3 |------| |
33 * | JobDesc #4 |------------
37 * The SharedDesc never changes for a connection unless rekeyed, but
38 * each packet will likely be in a different place. So all we need
39 * to know to process the packet is where the input is, where the
40 * output goes, and what context we want to process with. Context is
41 * in the SharedDesc, packet references in the JobDesc.
43 * So, a job desc looks like:
45 * ---------------------
47 * | ShareDesc Pointer |
54 * ---------------------
61 #include "desc_constr.h"
64 #include "sg_sw_sec4.h"
66 #include "caamhash_desc.h"
68 #define CAAM_CRA_PRIORITY 3000
70 /* max hash key is max split key size */
71 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
73 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
74 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
76 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
77 CAAM_MAX_HASH_KEY_SIZE)
78 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80 /* caam context sizes for hashes: running digest + 8 */
81 #define HASH_MSG_LEN 8
82 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
85 /* for print_hex_dumps with line references */
86 #define debug(format, arg...) printk(format, arg)
88 #define debug(format, arg...)
92 static struct list_head hash_list;
94 /* ahash per-session context */
95 struct caam_hash_ctx {
96 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
101 dma_addr_t sh_desc_update_first_dma;
102 dma_addr_t sh_desc_fin_dma;
103 dma_addr_t sh_desc_digest_dma;
104 enum dma_data_direction dir;
105 struct device *jrdev;
106 u8 key[CAAM_MAX_HASH_KEY_SIZE];
108 struct alginfo adata;
112 struct caam_hash_state {
115 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
117 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
119 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
120 int (*update)(struct ahash_request *req);
121 int (*final)(struct ahash_request *req);
122 int (*finup)(struct ahash_request *req);
126 struct caam_export_state {
127 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128 u8 caam_ctx[MAX_CTX_LEN];
130 int (*update)(struct ahash_request *req);
131 int (*final)(struct ahash_request *req);
132 int (*finup)(struct ahash_request *req);
135 static inline void switch_buf(struct caam_hash_state *state)
137 state->current_buf ^= 1;
140 static inline u8 *current_buf(struct caam_hash_state *state)
142 return state->current_buf ? state->buf_1 : state->buf_0;
145 static inline u8 *alt_buf(struct caam_hash_state *state)
147 return state->current_buf ? state->buf_0 : state->buf_1;
150 static inline int *current_buflen(struct caam_hash_state *state)
152 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
155 static inline int *alt_buflen(struct caam_hash_state *state)
157 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
160 /* Common job descriptor seq in/out ptr routines */
162 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
163 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
164 struct caam_hash_state *state,
167 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
168 ctx_len, DMA_FROM_DEVICE);
169 if (dma_mapping_error(jrdev, state->ctx_dma)) {
170 dev_err(jrdev, "unable to map ctx\n");
175 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
180 /* Map req->result, and append seq_out_ptr command that points to it */
181 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
182 u8 *result, int digestsize)
186 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
187 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
192 /* Map current buffer in state (if length > 0) and put it in link table */
193 static inline int buf_map_to_sec4_sg(struct device *jrdev,
194 struct sec4_sg_entry *sec4_sg,
195 struct caam_hash_state *state)
197 int buflen = *current_buflen(state);
202 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
204 if (dma_mapping_error(jrdev, state->buf_dma)) {
205 dev_err(jrdev, "unable to map buf\n");
210 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
215 /* Map state->caam_ctx, and add it to link table */
216 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
217 struct caam_hash_state *state, int ctx_len,
218 struct sec4_sg_entry *sec4_sg, u32 flag)
220 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
221 if (dma_mapping_error(jrdev, state->ctx_dma)) {
222 dev_err(jrdev, "unable to map ctx\n");
227 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
232 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
234 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
235 int digestsize = crypto_ahash_digestsize(ahash);
236 struct device *jrdev = ctx->jrdev;
237 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
240 ctx->adata.key_virt = ctx->key;
242 /* ahash_update shared descriptor */
243 desc = ctx->sh_desc_update;
244 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
245 ctx->ctx_len, true, ctrlpriv->era);
246 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
247 desc_bytes(desc), ctx->dir);
249 print_hex_dump(KERN_ERR,
250 "ahash update shdesc@"__stringify(__LINE__)": ",
251 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
254 /* ahash_update_first shared descriptor */
255 desc = ctx->sh_desc_update_first;
256 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
257 ctx->ctx_len, false, ctrlpriv->era);
258 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
259 desc_bytes(desc), ctx->dir);
261 print_hex_dump(KERN_ERR,
262 "ahash update first shdesc@"__stringify(__LINE__)": ",
263 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
266 /* ahash_final shared descriptor */
267 desc = ctx->sh_desc_fin;
268 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
269 ctx->ctx_len, true, ctrlpriv->era);
270 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
271 desc_bytes(desc), ctx->dir);
273 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
274 DUMP_PREFIX_ADDRESS, 16, 4, desc,
275 desc_bytes(desc), 1);
278 /* ahash_digest shared descriptor */
279 desc = ctx->sh_desc_digest;
280 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
281 ctx->ctx_len, false, ctrlpriv->era);
282 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
283 desc_bytes(desc), ctx->dir);
285 print_hex_dump(KERN_ERR,
286 "ahash digest shdesc@"__stringify(__LINE__)": ",
287 DUMP_PREFIX_ADDRESS, 16, 4, desc,
288 desc_bytes(desc), 1);
294 /* Digest hash size if it is too large */
295 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
296 u32 *keylen, u8 *key_out, u32 digestsize)
298 struct device *jrdev = ctx->jrdev;
300 struct split_key_result result;
301 dma_addr_t src_dma, dst_dma;
304 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
306 dev_err(jrdev, "unable to allocate key input memory\n");
310 init_job_desc(desc, 0);
312 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
314 if (dma_mapping_error(jrdev, src_dma)) {
315 dev_err(jrdev, "unable to map key input memory\n");
319 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
321 if (dma_mapping_error(jrdev, dst_dma)) {
322 dev_err(jrdev, "unable to map key output memory\n");
323 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
328 /* Job descriptor to perform unkeyed hash on key_in */
329 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
330 OP_ALG_AS_INITFINAL);
331 append_seq_in_ptr(desc, src_dma, *keylen, 0);
332 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
333 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
334 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
335 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
336 LDST_SRCDST_BYTE_CONTEXT);
339 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
340 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
341 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
342 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
346 init_completion(&result.completion);
348 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
351 wait_for_completion(&result.completion);
354 print_hex_dump(KERN_ERR,
355 "digested key@"__stringify(__LINE__)": ",
356 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
360 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
361 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
363 *keylen = digestsize;
370 static int ahash_setkey(struct crypto_ahash *ahash,
371 const u8 *key, unsigned int keylen)
373 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
374 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
375 int digestsize = crypto_ahash_digestsize(ahash);
376 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
378 u8 *hashed_key = NULL;
381 printk(KERN_ERR "keylen %d\n", keylen);
384 if (keylen > blocksize) {
385 hashed_key = kmalloc_array(digestsize,
387 GFP_KERNEL | GFP_DMA);
390 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
398 * If DKP is supported, use it in the shared descriptor to generate
401 if (ctrlpriv->era >= 6) {
402 ctx->adata.key_inline = true;
403 ctx->adata.keylen = keylen;
404 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
407 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
410 memcpy(ctx->key, key, keylen);
412 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
413 keylen, CAAM_MAX_HASH_KEY_SIZE);
419 return ahash_set_sh_desc(ahash);
422 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
427 * ahash_edesc - s/w-extended ahash descriptor
428 * @dst_dma: physical mapped address of req->result
429 * @sec4_sg_dma: physical mapped address of h/w link table
430 * @src_nents: number of segments in input scatterlist
431 * @sec4_sg_bytes: length of dma mapped sec4_sg space
432 * @hw_desc: the h/w job descriptor followed by any referenced link tables
433 * @sec4_sg: h/w link table
437 dma_addr_t sec4_sg_dma;
440 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
441 struct sec4_sg_entry sec4_sg[0];
444 static inline void ahash_unmap(struct device *dev,
445 struct ahash_edesc *edesc,
446 struct ahash_request *req, int dst_len)
448 struct caam_hash_state *state = ahash_request_ctx(req);
450 if (edesc->src_nents)
451 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
453 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
455 if (edesc->sec4_sg_bytes)
456 dma_unmap_single(dev, edesc->sec4_sg_dma,
457 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
459 if (state->buf_dma) {
460 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
466 static inline void ahash_unmap_ctx(struct device *dev,
467 struct ahash_edesc *edesc,
468 struct ahash_request *req, int dst_len, u32 flag)
470 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
471 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
472 struct caam_hash_state *state = ahash_request_ctx(req);
474 if (state->ctx_dma) {
475 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
478 ahash_unmap(dev, edesc, req, dst_len);
481 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
484 struct ahash_request *req = context;
485 struct ahash_edesc *edesc;
486 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
487 int digestsize = crypto_ahash_digestsize(ahash);
489 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
490 struct caam_hash_state *state = ahash_request_ctx(req);
492 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
495 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
497 caam_jr_strstatus(jrdev, err);
499 ahash_unmap(jrdev, edesc, req, digestsize);
503 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
504 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
507 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
508 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
512 req->base.complete(&req->base, err);
515 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
518 struct ahash_request *req = context;
519 struct ahash_edesc *edesc;
520 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
521 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
522 struct caam_hash_state *state = ahash_request_ctx(req);
524 int digestsize = crypto_ahash_digestsize(ahash);
526 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
529 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
531 caam_jr_strstatus(jrdev, err);
533 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
538 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
539 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
542 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
543 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
547 req->base.complete(&req->base, err);
550 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
553 struct ahash_request *req = context;
554 struct ahash_edesc *edesc;
555 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
556 int digestsize = crypto_ahash_digestsize(ahash);
558 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
559 struct caam_hash_state *state = ahash_request_ctx(req);
561 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
564 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
566 caam_jr_strstatus(jrdev, err);
568 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
572 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
573 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
576 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
577 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
581 req->base.complete(&req->base, err);
584 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
587 struct ahash_request *req = context;
588 struct ahash_edesc *edesc;
589 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
590 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
591 struct caam_hash_state *state = ahash_request_ctx(req);
593 int digestsize = crypto_ahash_digestsize(ahash);
595 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
598 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
600 caam_jr_strstatus(jrdev, err);
602 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
607 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
608 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
611 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
612 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
616 req->base.complete(&req->base, err);
620 * Allocate an enhanced descriptor, which contains the hardware descriptor
621 * and space for hardware scatter table containing sg_num entries.
623 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
624 int sg_num, u32 *sh_desc,
625 dma_addr_t sh_desc_dma,
628 struct ahash_edesc *edesc;
629 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
631 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
633 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
637 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
638 HDR_SHARE_DEFER | HDR_REVERSE);
643 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
644 struct ahash_edesc *edesc,
645 struct ahash_request *req, int nents,
646 unsigned int first_sg,
647 unsigned int first_bytes, size_t to_hash)
652 if (nents > 1 || first_sg) {
653 struct sec4_sg_entry *sg = edesc->sec4_sg;
654 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
656 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
658 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
659 if (dma_mapping_error(ctx->jrdev, src_dma)) {
660 dev_err(ctx->jrdev, "unable to map S/G table\n");
664 edesc->sec4_sg_bytes = sgsize;
665 edesc->sec4_sg_dma = src_dma;
668 src_dma = sg_dma_address(req->src);
672 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
678 /* submit update job descriptor */
679 static int ahash_update_ctx(struct ahash_request *req)
681 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
682 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
683 struct caam_hash_state *state = ahash_request_ctx(req);
684 struct device *jrdev = ctx->jrdev;
685 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
686 GFP_KERNEL : GFP_ATOMIC;
687 u8 *buf = current_buf(state);
688 int *buflen = current_buflen(state);
689 u8 *next_buf = alt_buf(state);
690 int *next_buflen = alt_buflen(state), last_buflen;
691 int in_len = *buflen + req->nbytes, to_hash;
693 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
694 struct ahash_edesc *edesc;
697 last_buflen = *next_buflen;
698 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
699 to_hash = in_len - *next_buflen;
702 src_nents = sg_nents_for_len(req->src,
703 req->nbytes - (*next_buflen));
705 dev_err(jrdev, "Invalid number of src SG.\n");
710 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
713 dev_err(jrdev, "unable to DMA map source\n");
720 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
721 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
722 sizeof(struct sec4_sg_entry);
725 * allocate space for base edesc and hw desc commands,
728 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
730 ctx->sh_desc_update_dma, flags);
732 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
736 edesc->src_nents = src_nents;
737 edesc->sec4_sg_bytes = sec4_sg_bytes;
739 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
740 edesc->sec4_sg, DMA_BIDIRECTIONAL);
744 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
749 sg_to_sec4_sg_last(req->src, mapped_nents,
750 edesc->sec4_sg + sec4_sg_src_index,
753 scatterwalk_map_and_copy(next_buf, req->src,
757 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
761 desc = edesc->hw_desc;
763 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
766 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
767 dev_err(jrdev, "unable to map S/G table\n");
772 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
775 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
778 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
779 DUMP_PREFIX_ADDRESS, 16, 4, desc,
780 desc_bytes(desc), 1);
783 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
788 } else if (*next_buflen) {
789 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
791 *buflen = *next_buflen;
792 *next_buflen = last_buflen;
795 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
796 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
797 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
798 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
804 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
809 static int ahash_final_ctx(struct ahash_request *req)
811 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
812 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
813 struct caam_hash_state *state = ahash_request_ctx(req);
814 struct device *jrdev = ctx->jrdev;
815 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
816 GFP_KERNEL : GFP_ATOMIC;
817 int buflen = *current_buflen(state);
819 int sec4_sg_bytes, sec4_sg_src_index;
820 int digestsize = crypto_ahash_digestsize(ahash);
821 struct ahash_edesc *edesc;
824 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
825 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
827 /* allocate space for base edesc and hw desc commands, link tables */
828 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
829 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
834 desc = edesc->hw_desc;
836 edesc->sec4_sg_bytes = sec4_sg_bytes;
838 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
839 edesc->sec4_sg, DMA_TO_DEVICE);
843 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
847 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
849 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
850 sec4_sg_bytes, DMA_TO_DEVICE);
851 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
852 dev_err(jrdev, "unable to map S/G table\n");
857 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
860 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
862 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
863 dev_err(jrdev, "unable to map dst\n");
869 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
870 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
879 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
884 static int ahash_finup_ctx(struct ahash_request *req)
886 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
887 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
888 struct caam_hash_state *state = ahash_request_ctx(req);
889 struct device *jrdev = ctx->jrdev;
890 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
891 GFP_KERNEL : GFP_ATOMIC;
892 int buflen = *current_buflen(state);
894 int sec4_sg_src_index;
895 int src_nents, mapped_nents;
896 int digestsize = crypto_ahash_digestsize(ahash);
897 struct ahash_edesc *edesc;
900 src_nents = sg_nents_for_len(req->src, req->nbytes);
902 dev_err(jrdev, "Invalid number of src SG.\n");
907 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
910 dev_err(jrdev, "unable to DMA map source\n");
917 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919 /* allocate space for base edesc and hw desc commands, link tables */
920 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
921 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
924 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
928 desc = edesc->hw_desc;
930 edesc->src_nents = src_nents;
932 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
933 edesc->sec4_sg, DMA_TO_DEVICE);
937 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
941 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
942 sec4_sg_src_index, ctx->ctx_len + buflen,
947 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
949 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
950 dev_err(jrdev, "unable to map dst\n");
956 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
957 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
960 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
966 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
971 static int ahash_digest(struct ahash_request *req)
973 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
974 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
975 struct caam_hash_state *state = ahash_request_ctx(req);
976 struct device *jrdev = ctx->jrdev;
977 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
978 GFP_KERNEL : GFP_ATOMIC;
980 int digestsize = crypto_ahash_digestsize(ahash);
981 int src_nents, mapped_nents;
982 struct ahash_edesc *edesc;
987 src_nents = sg_nents_for_len(req->src, req->nbytes);
989 dev_err(jrdev, "Invalid number of src SG.\n");
994 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
997 dev_err(jrdev, "unable to map source for DMA\n");
1004 /* allocate space for base edesc and hw desc commands, link tables */
1005 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1006 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1009 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1013 edesc->src_nents = src_nents;
1015 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1018 ahash_unmap(jrdev, edesc, req, digestsize);
1023 desc = edesc->hw_desc;
1025 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1027 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1028 dev_err(jrdev, "unable to map dst\n");
1029 ahash_unmap(jrdev, edesc, req, digestsize);
1035 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1036 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1039 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1043 ahash_unmap(jrdev, edesc, req, digestsize);
1050 /* submit ahash final if it the first job descriptor */
1051 static int ahash_final_no_ctx(struct ahash_request *req)
1053 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1054 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1055 struct caam_hash_state *state = ahash_request_ctx(req);
1056 struct device *jrdev = ctx->jrdev;
1057 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1058 GFP_KERNEL : GFP_ATOMIC;
1059 u8 *buf = current_buf(state);
1060 int buflen = *current_buflen(state);
1062 int digestsize = crypto_ahash_digestsize(ahash);
1063 struct ahash_edesc *edesc;
1066 /* allocate space for base edesc and hw desc commands, link tables */
1067 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1068 ctx->sh_desc_digest_dma, flags);
1072 desc = edesc->hw_desc;
1074 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1075 if (dma_mapping_error(jrdev, state->buf_dma)) {
1076 dev_err(jrdev, "unable to map src\n");
1080 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1082 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1084 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1085 dev_err(jrdev, "unable to map dst\n");
1090 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1091 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1094 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1098 ahash_unmap(jrdev, edesc, req, digestsize);
1104 ahash_unmap(jrdev, edesc, req, digestsize);
1110 /* submit ahash update if it the first job descriptor after update */
1111 static int ahash_update_no_ctx(struct ahash_request *req)
1113 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1114 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1115 struct caam_hash_state *state = ahash_request_ctx(req);
1116 struct device *jrdev = ctx->jrdev;
1117 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118 GFP_KERNEL : GFP_ATOMIC;
1119 u8 *buf = current_buf(state);
1120 int *buflen = current_buflen(state);
1121 u8 *next_buf = alt_buf(state);
1122 int *next_buflen = alt_buflen(state);
1123 int in_len = *buflen + req->nbytes, to_hash;
1124 int sec4_sg_bytes, src_nents, mapped_nents;
1125 struct ahash_edesc *edesc;
1129 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1130 to_hash = in_len - *next_buflen;
1133 src_nents = sg_nents_for_len(req->src,
1134 req->nbytes - *next_buflen);
1135 if (src_nents < 0) {
1136 dev_err(jrdev, "Invalid number of src SG.\n");
1141 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1143 if (!mapped_nents) {
1144 dev_err(jrdev, "unable to DMA map source\n");
1151 sec4_sg_bytes = (1 + mapped_nents) *
1152 sizeof(struct sec4_sg_entry);
1155 * allocate space for base edesc and hw desc commands,
1158 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1159 ctx->sh_desc_update_first,
1160 ctx->sh_desc_update_first_dma,
1163 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1167 edesc->src_nents = src_nents;
1168 edesc->sec4_sg_bytes = sec4_sg_bytes;
1170 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1174 sg_to_sec4_sg_last(req->src, mapped_nents,
1175 edesc->sec4_sg + 1, 0);
1178 scatterwalk_map_and_copy(next_buf, req->src,
1183 desc = edesc->hw_desc;
1185 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1188 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1189 dev_err(jrdev, "unable to map S/G table\n");
1194 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1196 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1201 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1202 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1203 desc_bytes(desc), 1);
1206 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1211 state->update = ahash_update_ctx;
1212 state->finup = ahash_finup_ctx;
1213 state->final = ahash_final_ctx;
1214 } else if (*next_buflen) {
1215 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1217 *buflen = *next_buflen;
1221 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1222 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1223 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1224 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1230 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1235 /* submit ahash finup if it the first job descriptor after update */
1236 static int ahash_finup_no_ctx(struct ahash_request *req)
1238 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1239 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1240 struct caam_hash_state *state = ahash_request_ctx(req);
1241 struct device *jrdev = ctx->jrdev;
1242 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1243 GFP_KERNEL : GFP_ATOMIC;
1244 int buflen = *current_buflen(state);
1246 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1247 int digestsize = crypto_ahash_digestsize(ahash);
1248 struct ahash_edesc *edesc;
1251 src_nents = sg_nents_for_len(req->src, req->nbytes);
1252 if (src_nents < 0) {
1253 dev_err(jrdev, "Invalid number of src SG.\n");
1258 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1260 if (!mapped_nents) {
1261 dev_err(jrdev, "unable to DMA map source\n");
1268 sec4_sg_src_index = 2;
1269 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1270 sizeof(struct sec4_sg_entry);
1272 /* allocate space for base edesc and hw desc commands, link tables */
1273 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1274 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1277 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1281 desc = edesc->hw_desc;
1283 edesc->src_nents = src_nents;
1284 edesc->sec4_sg_bytes = sec4_sg_bytes;
1286 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1290 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1293 dev_err(jrdev, "unable to map S/G table\n");
1297 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1299 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1300 dev_err(jrdev, "unable to map dst\n");
1305 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1306 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1309 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1313 ahash_unmap(jrdev, edesc, req, digestsize);
1319 ahash_unmap(jrdev, edesc, req, digestsize);
1325 /* submit first update job descriptor after init */
1326 static int ahash_update_first(struct ahash_request *req)
1328 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1329 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1330 struct caam_hash_state *state = ahash_request_ctx(req);
1331 struct device *jrdev = ctx->jrdev;
1332 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1333 GFP_KERNEL : GFP_ATOMIC;
1334 u8 *next_buf = alt_buf(state);
1335 int *next_buflen = alt_buflen(state);
1338 int src_nents, mapped_nents;
1339 struct ahash_edesc *edesc;
1342 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1344 to_hash = req->nbytes - *next_buflen;
1347 src_nents = sg_nents_for_len(req->src,
1348 req->nbytes - *next_buflen);
1349 if (src_nents < 0) {
1350 dev_err(jrdev, "Invalid number of src SG.\n");
1355 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1357 if (!mapped_nents) {
1358 dev_err(jrdev, "unable to map source for DMA\n");
1366 * allocate space for base edesc and hw desc commands,
1369 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1371 ctx->sh_desc_update_first,
1372 ctx->sh_desc_update_first_dma,
1375 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1379 edesc->src_nents = src_nents;
1381 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1387 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1390 desc = edesc->hw_desc;
1392 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1397 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1398 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1399 desc_bytes(desc), 1);
1402 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1407 state->update = ahash_update_ctx;
1408 state->finup = ahash_finup_ctx;
1409 state->final = ahash_final_ctx;
1410 } else if (*next_buflen) {
1411 state->update = ahash_update_no_ctx;
1412 state->finup = ahash_finup_no_ctx;
1413 state->final = ahash_final_no_ctx;
1414 scatterwalk_map_and_copy(next_buf, req->src, 0,
1419 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1420 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1426 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1431 static int ahash_finup_first(struct ahash_request *req)
1433 return ahash_digest(req);
1436 static int ahash_init(struct ahash_request *req)
1438 struct caam_hash_state *state = ahash_request_ctx(req);
1440 state->update = ahash_update_first;
1441 state->finup = ahash_finup_first;
1442 state->final = ahash_final_no_ctx;
1445 state->current_buf = 0;
1447 state->buflen_0 = 0;
1448 state->buflen_1 = 0;
1453 static int ahash_update(struct ahash_request *req)
1455 struct caam_hash_state *state = ahash_request_ctx(req);
1457 return state->update(req);
1460 static int ahash_finup(struct ahash_request *req)
1462 struct caam_hash_state *state = ahash_request_ctx(req);
1464 return state->finup(req);
1467 static int ahash_final(struct ahash_request *req)
1469 struct caam_hash_state *state = ahash_request_ctx(req);
1471 return state->final(req);
1474 static int ahash_export(struct ahash_request *req, void *out)
1476 struct caam_hash_state *state = ahash_request_ctx(req);
1477 struct caam_export_state *export = out;
1481 if (state->current_buf) {
1483 len = state->buflen_1;
1486 len = state->buflen_0;
1489 memcpy(export->buf, buf, len);
1490 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1491 export->buflen = len;
1492 export->update = state->update;
1493 export->final = state->final;
1494 export->finup = state->finup;
1499 static int ahash_import(struct ahash_request *req, const void *in)
1501 struct caam_hash_state *state = ahash_request_ctx(req);
1502 const struct caam_export_state *export = in;
1504 memset(state, 0, sizeof(*state));
1505 memcpy(state->buf_0, export->buf, export->buflen);
1506 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1507 state->buflen_0 = export->buflen;
1508 state->update = export->update;
1509 state->final = export->final;
1510 state->finup = export->finup;
1515 struct caam_hash_template {
1516 char name[CRYPTO_MAX_ALG_NAME];
1517 char driver_name[CRYPTO_MAX_ALG_NAME];
1518 char hmac_name[CRYPTO_MAX_ALG_NAME];
1519 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1520 unsigned int blocksize;
1521 struct ahash_alg template_ahash;
1525 /* ahash descriptors */
1526 static struct caam_hash_template driver_hash[] = {
1529 .driver_name = "sha1-caam",
1530 .hmac_name = "hmac(sha1)",
1531 .hmac_driver_name = "hmac-sha1-caam",
1532 .blocksize = SHA1_BLOCK_SIZE,
1535 .update = ahash_update,
1536 .final = ahash_final,
1537 .finup = ahash_finup,
1538 .digest = ahash_digest,
1539 .export = ahash_export,
1540 .import = ahash_import,
1541 .setkey = ahash_setkey,
1543 .digestsize = SHA1_DIGEST_SIZE,
1544 .statesize = sizeof(struct caam_export_state),
1547 .alg_type = OP_ALG_ALGSEL_SHA1,
1550 .driver_name = "sha224-caam",
1551 .hmac_name = "hmac(sha224)",
1552 .hmac_driver_name = "hmac-sha224-caam",
1553 .blocksize = SHA224_BLOCK_SIZE,
1556 .update = ahash_update,
1557 .final = ahash_final,
1558 .finup = ahash_finup,
1559 .digest = ahash_digest,
1560 .export = ahash_export,
1561 .import = ahash_import,
1562 .setkey = ahash_setkey,
1564 .digestsize = SHA224_DIGEST_SIZE,
1565 .statesize = sizeof(struct caam_export_state),
1568 .alg_type = OP_ALG_ALGSEL_SHA224,
1571 .driver_name = "sha256-caam",
1572 .hmac_name = "hmac(sha256)",
1573 .hmac_driver_name = "hmac-sha256-caam",
1574 .blocksize = SHA256_BLOCK_SIZE,
1577 .update = ahash_update,
1578 .final = ahash_final,
1579 .finup = ahash_finup,
1580 .digest = ahash_digest,
1581 .export = ahash_export,
1582 .import = ahash_import,
1583 .setkey = ahash_setkey,
1585 .digestsize = SHA256_DIGEST_SIZE,
1586 .statesize = sizeof(struct caam_export_state),
1589 .alg_type = OP_ALG_ALGSEL_SHA256,
1592 .driver_name = "sha384-caam",
1593 .hmac_name = "hmac(sha384)",
1594 .hmac_driver_name = "hmac-sha384-caam",
1595 .blocksize = SHA384_BLOCK_SIZE,
1598 .update = ahash_update,
1599 .final = ahash_final,
1600 .finup = ahash_finup,
1601 .digest = ahash_digest,
1602 .export = ahash_export,
1603 .import = ahash_import,
1604 .setkey = ahash_setkey,
1606 .digestsize = SHA384_DIGEST_SIZE,
1607 .statesize = sizeof(struct caam_export_state),
1610 .alg_type = OP_ALG_ALGSEL_SHA384,
1613 .driver_name = "sha512-caam",
1614 .hmac_name = "hmac(sha512)",
1615 .hmac_driver_name = "hmac-sha512-caam",
1616 .blocksize = SHA512_BLOCK_SIZE,
1619 .update = ahash_update,
1620 .final = ahash_final,
1621 .finup = ahash_finup,
1622 .digest = ahash_digest,
1623 .export = ahash_export,
1624 .import = ahash_import,
1625 .setkey = ahash_setkey,
1627 .digestsize = SHA512_DIGEST_SIZE,
1628 .statesize = sizeof(struct caam_export_state),
1631 .alg_type = OP_ALG_ALGSEL_SHA512,
1634 .driver_name = "md5-caam",
1635 .hmac_name = "hmac(md5)",
1636 .hmac_driver_name = "hmac-md5-caam",
1637 .blocksize = MD5_BLOCK_WORDS * 4,
1640 .update = ahash_update,
1641 .final = ahash_final,
1642 .finup = ahash_finup,
1643 .digest = ahash_digest,
1644 .export = ahash_export,
1645 .import = ahash_import,
1646 .setkey = ahash_setkey,
1648 .digestsize = MD5_DIGEST_SIZE,
1649 .statesize = sizeof(struct caam_export_state),
1652 .alg_type = OP_ALG_ALGSEL_MD5,
1656 struct caam_hash_alg {
1657 struct list_head entry;
1659 struct ahash_alg ahash_alg;
1662 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1664 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1665 struct crypto_alg *base = tfm->__crt_alg;
1666 struct hash_alg_common *halg =
1667 container_of(base, struct hash_alg_common, base);
1668 struct ahash_alg *alg =
1669 container_of(halg, struct ahash_alg, halg);
1670 struct caam_hash_alg *caam_hash =
1671 container_of(alg, struct caam_hash_alg, ahash_alg);
1672 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1673 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1674 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1675 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1677 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1679 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1680 dma_addr_t dma_addr;
1681 struct caam_drv_private *priv;
1684 * Get a Job ring from Job Ring driver to ensure in-order
1685 * crypto request processing per tfm
1687 ctx->jrdev = caam_jr_alloc();
1688 if (IS_ERR(ctx->jrdev)) {
1689 pr_err("Job Ring Device allocation for transform failed\n");
1690 return PTR_ERR(ctx->jrdev);
1693 priv = dev_get_drvdata(ctx->jrdev->parent);
1694 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1696 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1697 offsetof(struct caam_hash_ctx,
1698 sh_desc_update_dma),
1699 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1700 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1701 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1702 caam_jr_free(ctx->jrdev);
1706 ctx->sh_desc_update_dma = dma_addr;
1707 ctx->sh_desc_update_first_dma = dma_addr +
1708 offsetof(struct caam_hash_ctx,
1709 sh_desc_update_first);
1710 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1712 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1715 /* copy descriptor header template value */
1716 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1718 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1719 OP_ALG_ALGSEL_SUBMASK) >>
1720 OP_ALG_ALGSEL_SHIFT];
1722 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1723 sizeof(struct caam_hash_state));
1724 return ahash_set_sh_desc(ahash);
1727 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1729 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1731 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1732 offsetof(struct caam_hash_ctx,
1733 sh_desc_update_dma),
1734 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1735 caam_jr_free(ctx->jrdev);
1738 static void __exit caam_algapi_hash_exit(void)
1740 struct caam_hash_alg *t_alg, *n;
1742 if (!hash_list.next)
1745 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1746 crypto_unregister_ahash(&t_alg->ahash_alg);
1747 list_del(&t_alg->entry);
1752 static struct caam_hash_alg *
1753 caam_hash_alloc(struct caam_hash_template *template,
1756 struct caam_hash_alg *t_alg;
1757 struct ahash_alg *halg;
1758 struct crypto_alg *alg;
1760 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1762 pr_err("failed to allocate t_alg\n");
1763 return ERR_PTR(-ENOMEM);
1766 t_alg->ahash_alg = template->template_ahash;
1767 halg = &t_alg->ahash_alg;
1768 alg = &halg->halg.base;
1771 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1772 template->hmac_name);
1773 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1774 template->hmac_driver_name);
1776 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1778 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1779 template->driver_name);
1780 t_alg->ahash_alg.setkey = NULL;
1782 alg->cra_module = THIS_MODULE;
1783 alg->cra_init = caam_hash_cra_init;
1784 alg->cra_exit = caam_hash_cra_exit;
1785 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1786 alg->cra_priority = CAAM_CRA_PRIORITY;
1787 alg->cra_blocksize = template->blocksize;
1788 alg->cra_alignmask = 0;
1789 alg->cra_flags = CRYPTO_ALG_ASYNC;
1791 t_alg->alg_type = template->alg_type;
1796 static int __init caam_algapi_hash_init(void)
1798 struct device_node *dev_node;
1799 struct platform_device *pdev;
1800 struct device *ctrldev;
1802 struct caam_drv_private *priv;
1803 unsigned int md_limit = SHA512_DIGEST_SIZE;
1804 u32 cha_inst, cha_vid;
1806 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1808 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1813 pdev = of_find_device_by_node(dev_node);
1815 of_node_put(dev_node);
1819 ctrldev = &pdev->dev;
1820 priv = dev_get_drvdata(ctrldev);
1821 of_node_put(dev_node);
1824 * If priv is NULL, it's probably because the caam driver wasn't
1825 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1831 * Register crypto algorithms the device supports. First, identify
1832 * presence and attributes of MD block.
1834 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1835 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1838 * Skip registration of any hashing algorithms if MD block
1841 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1844 /* Limit digest size based on LP256 */
1845 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1846 md_limit = SHA256_DIGEST_SIZE;
1848 INIT_LIST_HEAD(&hash_list);
1850 /* register crypto algorithms the device supports */
1851 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1852 struct caam_hash_alg *t_alg;
1853 struct caam_hash_template *alg = driver_hash + i;
1855 /* If MD size is not supported by device, skip registration */
1856 if (alg->template_ahash.halg.digestsize > md_limit)
1859 /* register hmac version */
1860 t_alg = caam_hash_alloc(alg, true);
1861 if (IS_ERR(t_alg)) {
1862 err = PTR_ERR(t_alg);
1863 pr_warn("%s alg allocation failed\n", alg->driver_name);
1867 err = crypto_register_ahash(&t_alg->ahash_alg);
1869 pr_warn("%s alg registration failed: %d\n",
1870 t_alg->ahash_alg.halg.base.cra_driver_name,
1874 list_add_tail(&t_alg->entry, &hash_list);
1876 /* register unkeyed version */
1877 t_alg = caam_hash_alloc(alg, false);
1878 if (IS_ERR(t_alg)) {
1879 err = PTR_ERR(t_alg);
1880 pr_warn("%s alg allocation failed\n", alg->driver_name);
1884 err = crypto_register_ahash(&t_alg->ahash_alg);
1886 pr_warn("%s alg registration failed: %d\n",
1887 t_alg->ahash_alg.halg.base.cra_driver_name,
1891 list_add_tail(&t_alg->entry, &hash_list);
1897 module_init(caam_algapi_hash_init);
1898 module_exit(caam_algapi_hash_exit);
1900 MODULE_LICENSE("GPL");
1901 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1902 MODULE_AUTHOR("Freescale Semiconductor - NMG");