2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
107 dma_addr_t sh_desc_update_first_dma;
108 dma_addr_t sh_desc_fin_dma;
109 dma_addr_t sh_desc_digest_dma;
110 struct device *jrdev;
111 u8 key[CAAM_MAX_HASH_KEY_SIZE];
113 struct alginfo adata;
117 struct caam_hash_state {
120 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
122 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
124 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
125 int (*update)(struct ahash_request *req);
126 int (*final)(struct ahash_request *req);
127 int (*finup)(struct ahash_request *req);
131 struct caam_export_state {
132 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
133 u8 caam_ctx[MAX_CTX_LEN];
135 int (*update)(struct ahash_request *req);
136 int (*final)(struct ahash_request *req);
137 int (*finup)(struct ahash_request *req);
140 static inline void switch_buf(struct caam_hash_state *state)
142 state->current_buf ^= 1;
145 static inline u8 *current_buf(struct caam_hash_state *state)
147 return state->current_buf ? state->buf_1 : state->buf_0;
150 static inline u8 *alt_buf(struct caam_hash_state *state)
152 return state->current_buf ? state->buf_0 : state->buf_1;
155 static inline int *current_buflen(struct caam_hash_state *state)
157 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
160 static inline int *alt_buflen(struct caam_hash_state *state)
162 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
165 /* Common job descriptor seq in/out ptr routines */
167 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
168 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
169 struct caam_hash_state *state,
172 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
173 ctx_len, DMA_FROM_DEVICE);
174 if (dma_mapping_error(jrdev, state->ctx_dma)) {
175 dev_err(jrdev, "unable to map ctx\n");
180 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
185 /* Map req->result, and append seq_out_ptr command that points to it */
186 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
187 u8 *result, int digestsize)
191 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
192 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
197 /* Map current buffer in state (if length > 0) and put it in link table */
198 static inline int buf_map_to_sec4_sg(struct device *jrdev,
199 struct sec4_sg_entry *sec4_sg,
200 struct caam_hash_state *state)
202 int buflen = *current_buflen(state);
207 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
209 if (dma_mapping_error(jrdev, state->buf_dma)) {
210 dev_err(jrdev, "unable to map buf\n");
215 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
220 /* Map state->caam_ctx, and add it to link table */
221 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
222 struct caam_hash_state *state, int ctx_len,
223 struct sec4_sg_entry *sec4_sg, u32 flag)
225 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
226 if (dma_mapping_error(jrdev, state->ctx_dma)) {
227 dev_err(jrdev, "unable to map ctx\n");
232 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
238 * For ahash update, final and finup (import_ctx = true)
239 * import context, read and write to seqout
240 * For ahash firsts and digest (import_ctx = false)
241 * read and write to seqout
243 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
244 struct caam_hash_ctx *ctx, bool import_ctx)
246 u32 op = ctx->adata.algtype;
249 init_sh_desc(desc, HDR_SHARE_SERIAL);
251 /* Append key if it has been set; ahash update excluded */
252 if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
253 /* Skip key loading if already shared */
254 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
257 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
258 ctx->adata.keylen, CLASS_2 |
259 KEY_DEST_MDHA_SPLIT | KEY_ENC);
261 set_jump_tgt_here(desc, skip_key_load);
263 op |= OP_ALG_AAI_HMAC_PRECOMP;
266 /* If needed, import context from software */
268 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
269 LDST_SRCDST_BYTE_CONTEXT);
271 /* Class 2 operation */
272 append_operation(desc, op | state | OP_ALG_ENCRYPT);
275 * Load from buf and/or src and write to req->result or state->context
276 * Calculate remaining bytes to read
278 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
279 /* Read remaining bytes */
280 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
281 FIFOLD_TYPE_MSG | KEY_VLF);
282 /* Store class2 context bytes */
283 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
284 LDST_SRCDST_BYTE_CONTEXT);
287 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
289 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
290 int digestsize = crypto_ahash_digestsize(ahash);
291 struct device *jrdev = ctx->jrdev;
294 /* ahash_update shared descriptor */
295 desc = ctx->sh_desc_update;
296 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
297 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
298 desc_bytes(desc), DMA_TO_DEVICE);
300 print_hex_dump(KERN_ERR,
301 "ahash update shdesc@"__stringify(__LINE__)": ",
302 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
305 /* ahash_update_first shared descriptor */
306 desc = ctx->sh_desc_update_first;
307 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
308 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
309 desc_bytes(desc), DMA_TO_DEVICE);
311 print_hex_dump(KERN_ERR,
312 "ahash update first shdesc@"__stringify(__LINE__)": ",
313 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
316 /* ahash_final shared descriptor */
317 desc = ctx->sh_desc_fin;
318 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
319 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
320 desc_bytes(desc), DMA_TO_DEVICE);
322 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
323 DUMP_PREFIX_ADDRESS, 16, 4, desc,
324 desc_bytes(desc), 1);
327 /* ahash_digest shared descriptor */
328 desc = ctx->sh_desc_digest;
329 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
330 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
331 desc_bytes(desc), DMA_TO_DEVICE);
333 print_hex_dump(KERN_ERR,
334 "ahash digest shdesc@"__stringify(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc,
336 desc_bytes(desc), 1);
342 /* Digest hash size if it is too large */
343 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
344 u32 *keylen, u8 *key_out, u32 digestsize)
346 struct device *jrdev = ctx->jrdev;
348 struct split_key_result result;
349 dma_addr_t src_dma, dst_dma;
352 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
354 dev_err(jrdev, "unable to allocate key input memory\n");
358 init_job_desc(desc, 0);
360 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
362 if (dma_mapping_error(jrdev, src_dma)) {
363 dev_err(jrdev, "unable to map key input memory\n");
367 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
369 if (dma_mapping_error(jrdev, dst_dma)) {
370 dev_err(jrdev, "unable to map key output memory\n");
371 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
376 /* Job descriptor to perform unkeyed hash on key_in */
377 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
378 OP_ALG_AS_INITFINAL);
379 append_seq_in_ptr(desc, src_dma, *keylen, 0);
380 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
381 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
382 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
383 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
384 LDST_SRCDST_BYTE_CONTEXT);
387 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
388 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
389 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
390 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
394 init_completion(&result.completion);
396 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
399 wait_for_completion(&result.completion);
402 print_hex_dump(KERN_ERR,
403 "digested key@"__stringify(__LINE__)": ",
404 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
408 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
409 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
411 *keylen = digestsize;
418 static int ahash_setkey(struct crypto_ahash *ahash,
419 const u8 *key, unsigned int keylen)
421 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
422 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
423 int digestsize = crypto_ahash_digestsize(ahash);
425 u8 *hashed_key = NULL;
428 printk(KERN_ERR "keylen %d\n", keylen);
431 if (keylen > blocksize) {
432 hashed_key = kmalloc_array(digestsize,
434 GFP_KERNEL | GFP_DMA);
437 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
444 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
445 CAAM_MAX_HASH_KEY_SIZE);
450 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
451 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
452 ctx->adata.keylen_pad, 1);
456 return ahash_set_sh_desc(ahash);
459 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
464 * ahash_edesc - s/w-extended ahash descriptor
465 * @dst_dma: physical mapped address of req->result
466 * @sec4_sg_dma: physical mapped address of h/w link table
467 * @src_nents: number of segments in input scatterlist
468 * @sec4_sg_bytes: length of dma mapped sec4_sg space
469 * @hw_desc: the h/w job descriptor followed by any referenced link tables
470 * @sec4_sg: h/w link table
474 dma_addr_t sec4_sg_dma;
477 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
478 struct sec4_sg_entry sec4_sg[0];
481 static inline void ahash_unmap(struct device *dev,
482 struct ahash_edesc *edesc,
483 struct ahash_request *req, int dst_len)
485 struct caam_hash_state *state = ahash_request_ctx(req);
487 if (edesc->src_nents)
488 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
490 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
492 if (edesc->sec4_sg_bytes)
493 dma_unmap_single(dev, edesc->sec4_sg_dma,
494 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
496 if (state->buf_dma) {
497 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
503 static inline void ahash_unmap_ctx(struct device *dev,
504 struct ahash_edesc *edesc,
505 struct ahash_request *req, int dst_len, u32 flag)
507 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
508 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
509 struct caam_hash_state *state = ahash_request_ctx(req);
511 if (state->ctx_dma) {
512 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
515 ahash_unmap(dev, edesc, req, dst_len);
518 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
521 struct ahash_request *req = context;
522 struct ahash_edesc *edesc;
523 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
524 int digestsize = crypto_ahash_digestsize(ahash);
526 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
527 struct caam_hash_state *state = ahash_request_ctx(req);
529 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
532 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
534 caam_jr_strstatus(jrdev, err);
536 ahash_unmap(jrdev, edesc, req, digestsize);
540 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
541 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
544 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
545 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
549 req->base.complete(&req->base, err);
552 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
555 struct ahash_request *req = context;
556 struct ahash_edesc *edesc;
557 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
558 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
559 struct caam_hash_state *state = ahash_request_ctx(req);
561 int digestsize = crypto_ahash_digestsize(ahash);
563 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
566 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
568 caam_jr_strstatus(jrdev, err);
570 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
575 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
576 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
579 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
580 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
584 req->base.complete(&req->base, err);
587 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
590 struct ahash_request *req = context;
591 struct ahash_edesc *edesc;
592 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
593 int digestsize = crypto_ahash_digestsize(ahash);
595 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
596 struct caam_hash_state *state = ahash_request_ctx(req);
598 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
601 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
603 caam_jr_strstatus(jrdev, err);
605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
609 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
610 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
613 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
614 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
618 req->base.complete(&req->base, err);
621 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
624 struct ahash_request *req = context;
625 struct ahash_edesc *edesc;
626 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
627 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
628 struct caam_hash_state *state = ahash_request_ctx(req);
630 int digestsize = crypto_ahash_digestsize(ahash);
632 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
635 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
637 caam_jr_strstatus(jrdev, err);
639 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
644 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
645 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
648 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
649 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
653 req->base.complete(&req->base, err);
657 * Allocate an enhanced descriptor, which contains the hardware descriptor
658 * and space for hardware scatter table containing sg_num entries.
660 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
661 int sg_num, u32 *sh_desc,
662 dma_addr_t sh_desc_dma,
665 struct ahash_edesc *edesc;
666 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
668 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
670 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
674 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
675 HDR_SHARE_DEFER | HDR_REVERSE);
680 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
681 struct ahash_edesc *edesc,
682 struct ahash_request *req, int nents,
683 unsigned int first_sg,
684 unsigned int first_bytes, size_t to_hash)
689 if (nents > 1 || first_sg) {
690 struct sec4_sg_entry *sg = edesc->sec4_sg;
691 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
693 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
695 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
696 if (dma_mapping_error(ctx->jrdev, src_dma)) {
697 dev_err(ctx->jrdev, "unable to map S/G table\n");
701 edesc->sec4_sg_bytes = sgsize;
702 edesc->sec4_sg_dma = src_dma;
705 src_dma = sg_dma_address(req->src);
709 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
715 /* submit update job descriptor */
716 static int ahash_update_ctx(struct ahash_request *req)
718 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
719 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
720 struct caam_hash_state *state = ahash_request_ctx(req);
721 struct device *jrdev = ctx->jrdev;
722 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
723 GFP_KERNEL : GFP_ATOMIC;
724 u8 *buf = current_buf(state);
725 int *buflen = current_buflen(state);
726 u8 *next_buf = alt_buf(state);
727 int *next_buflen = alt_buflen(state), last_buflen;
728 int in_len = *buflen + req->nbytes, to_hash;
730 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
731 struct ahash_edesc *edesc;
734 last_buflen = *next_buflen;
735 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
736 to_hash = in_len - *next_buflen;
739 src_nents = sg_nents_for_len(req->src,
740 req->nbytes - (*next_buflen));
742 dev_err(jrdev, "Invalid number of src SG.\n");
747 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
750 dev_err(jrdev, "unable to DMA map source\n");
757 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
758 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
759 sizeof(struct sec4_sg_entry);
762 * allocate space for base edesc and hw desc commands,
765 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
767 ctx->sh_desc_update_dma, flags);
769 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
773 edesc->src_nents = src_nents;
774 edesc->sec4_sg_bytes = sec4_sg_bytes;
776 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
777 edesc->sec4_sg, DMA_BIDIRECTIONAL);
781 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
786 sg_to_sec4_sg_last(req->src, mapped_nents,
787 edesc->sec4_sg + sec4_sg_src_index,
790 scatterwalk_map_and_copy(next_buf, req->src,
794 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
798 desc = edesc->hw_desc;
800 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
803 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
804 dev_err(jrdev, "unable to map S/G table\n");
809 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
812 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
815 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
816 DUMP_PREFIX_ADDRESS, 16, 4, desc,
817 desc_bytes(desc), 1);
820 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
825 } else if (*next_buflen) {
826 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
828 *buflen = *next_buflen;
829 *next_buflen = last_buflen;
832 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
833 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
834 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
835 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
841 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
846 static int ahash_final_ctx(struct ahash_request *req)
848 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
849 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
850 struct caam_hash_state *state = ahash_request_ctx(req);
851 struct device *jrdev = ctx->jrdev;
852 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
853 GFP_KERNEL : GFP_ATOMIC;
854 int buflen = *current_buflen(state);
856 int sec4_sg_bytes, sec4_sg_src_index;
857 int digestsize = crypto_ahash_digestsize(ahash);
858 struct ahash_edesc *edesc;
861 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
862 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
864 /* allocate space for base edesc and hw desc commands, link tables */
865 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
866 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
871 desc = edesc->hw_desc;
873 edesc->sec4_sg_bytes = sec4_sg_bytes;
875 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
876 edesc->sec4_sg, DMA_TO_DEVICE);
880 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
884 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
886 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
887 sec4_sg_bytes, DMA_TO_DEVICE);
888 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
889 dev_err(jrdev, "unable to map S/G table\n");
894 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
897 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
899 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
900 dev_err(jrdev, "unable to map dst\n");
906 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
907 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
910 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
916 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
921 static int ahash_finup_ctx(struct ahash_request *req)
923 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
924 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
925 struct caam_hash_state *state = ahash_request_ctx(req);
926 struct device *jrdev = ctx->jrdev;
927 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
928 GFP_KERNEL : GFP_ATOMIC;
929 int buflen = *current_buflen(state);
931 int sec4_sg_src_index;
932 int src_nents, mapped_nents;
933 int digestsize = crypto_ahash_digestsize(ahash);
934 struct ahash_edesc *edesc;
937 src_nents = sg_nents_for_len(req->src, req->nbytes);
939 dev_err(jrdev, "Invalid number of src SG.\n");
944 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
947 dev_err(jrdev, "unable to DMA map source\n");
954 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
956 /* allocate space for base edesc and hw desc commands, link tables */
957 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
958 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
961 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
965 desc = edesc->hw_desc;
967 edesc->src_nents = src_nents;
969 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
970 edesc->sec4_sg, DMA_TO_DEVICE);
974 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
978 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
979 sec4_sg_src_index, ctx->ctx_len + buflen,
984 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
986 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
987 dev_err(jrdev, "unable to map dst\n");
993 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
994 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
997 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1001 return -EINPROGRESS;
1003 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1008 static int ahash_digest(struct ahash_request *req)
1010 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1011 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1012 struct caam_hash_state *state = ahash_request_ctx(req);
1013 struct device *jrdev = ctx->jrdev;
1014 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1015 GFP_KERNEL : GFP_ATOMIC;
1017 int digestsize = crypto_ahash_digestsize(ahash);
1018 int src_nents, mapped_nents;
1019 struct ahash_edesc *edesc;
1024 src_nents = sg_nents_for_len(req->src, req->nbytes);
1025 if (src_nents < 0) {
1026 dev_err(jrdev, "Invalid number of src SG.\n");
1031 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1033 if (!mapped_nents) {
1034 dev_err(jrdev, "unable to map source for DMA\n");
1041 /* allocate space for base edesc and hw desc commands, link tables */
1042 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1043 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1046 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1050 edesc->src_nents = src_nents;
1052 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1055 ahash_unmap(jrdev, edesc, req, digestsize);
1060 desc = edesc->hw_desc;
1062 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1064 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1065 dev_err(jrdev, "unable to map dst\n");
1066 ahash_unmap(jrdev, edesc, req, digestsize);
1072 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1073 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1076 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1080 ahash_unmap(jrdev, edesc, req, digestsize);
1087 /* submit ahash final if it the first job descriptor */
1088 static int ahash_final_no_ctx(struct ahash_request *req)
1090 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1091 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1092 struct caam_hash_state *state = ahash_request_ctx(req);
1093 struct device *jrdev = ctx->jrdev;
1094 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1095 GFP_KERNEL : GFP_ATOMIC;
1096 u8 *buf = current_buf(state);
1097 int buflen = *current_buflen(state);
1099 int digestsize = crypto_ahash_digestsize(ahash);
1100 struct ahash_edesc *edesc;
1103 /* allocate space for base edesc and hw desc commands, link tables */
1104 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1105 ctx->sh_desc_digest_dma, flags);
1109 desc = edesc->hw_desc;
1111 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1112 if (dma_mapping_error(jrdev, state->buf_dma)) {
1113 dev_err(jrdev, "unable to map src\n");
1117 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1119 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1121 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1122 dev_err(jrdev, "unable to map dst\n");
1127 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1128 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1131 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1135 ahash_unmap(jrdev, edesc, req, digestsize);
1141 ahash_unmap(jrdev, edesc, req, digestsize);
1147 /* submit ahash update if it the first job descriptor after update */
1148 static int ahash_update_no_ctx(struct ahash_request *req)
1150 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1151 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1152 struct caam_hash_state *state = ahash_request_ctx(req);
1153 struct device *jrdev = ctx->jrdev;
1154 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1155 GFP_KERNEL : GFP_ATOMIC;
1156 u8 *buf = current_buf(state);
1157 int *buflen = current_buflen(state);
1158 u8 *next_buf = alt_buf(state);
1159 int *next_buflen = alt_buflen(state);
1160 int in_len = *buflen + req->nbytes, to_hash;
1161 int sec4_sg_bytes, src_nents, mapped_nents;
1162 struct ahash_edesc *edesc;
1166 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1167 to_hash = in_len - *next_buflen;
1170 src_nents = sg_nents_for_len(req->src,
1171 req->nbytes - *next_buflen);
1172 if (src_nents < 0) {
1173 dev_err(jrdev, "Invalid number of src SG.\n");
1178 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1180 if (!mapped_nents) {
1181 dev_err(jrdev, "unable to DMA map source\n");
1188 sec4_sg_bytes = (1 + mapped_nents) *
1189 sizeof(struct sec4_sg_entry);
1192 * allocate space for base edesc and hw desc commands,
1195 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1196 ctx->sh_desc_update_first,
1197 ctx->sh_desc_update_first_dma,
1200 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1204 edesc->src_nents = src_nents;
1205 edesc->sec4_sg_bytes = sec4_sg_bytes;
1207 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1211 sg_to_sec4_sg_last(req->src, mapped_nents,
1212 edesc->sec4_sg + 1, 0);
1215 scatterwalk_map_and_copy(next_buf, req->src,
1220 desc = edesc->hw_desc;
1222 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1225 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1226 dev_err(jrdev, "unable to map S/G table\n");
1231 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1233 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1238 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1239 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1240 desc_bytes(desc), 1);
1243 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1248 state->update = ahash_update_ctx;
1249 state->finup = ahash_finup_ctx;
1250 state->final = ahash_final_ctx;
1251 } else if (*next_buflen) {
1252 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1254 *buflen = *next_buflen;
1258 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1259 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1260 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1261 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1267 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1272 /* submit ahash finup if it the first job descriptor after update */
1273 static int ahash_finup_no_ctx(struct ahash_request *req)
1275 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1276 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1277 struct caam_hash_state *state = ahash_request_ctx(req);
1278 struct device *jrdev = ctx->jrdev;
1279 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1280 GFP_KERNEL : GFP_ATOMIC;
1281 int buflen = *current_buflen(state);
1283 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1284 int digestsize = crypto_ahash_digestsize(ahash);
1285 struct ahash_edesc *edesc;
1288 src_nents = sg_nents_for_len(req->src, req->nbytes);
1289 if (src_nents < 0) {
1290 dev_err(jrdev, "Invalid number of src SG.\n");
1295 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1297 if (!mapped_nents) {
1298 dev_err(jrdev, "unable to DMA map source\n");
1305 sec4_sg_src_index = 2;
1306 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1307 sizeof(struct sec4_sg_entry);
1309 /* allocate space for base edesc and hw desc commands, link tables */
1310 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1311 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1314 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1318 desc = edesc->hw_desc;
1320 edesc->src_nents = src_nents;
1321 edesc->sec4_sg_bytes = sec4_sg_bytes;
1323 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1327 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1330 dev_err(jrdev, "unable to map S/G table\n");
1334 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1336 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1337 dev_err(jrdev, "unable to map dst\n");
1342 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1343 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1346 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1350 ahash_unmap(jrdev, edesc, req, digestsize);
1356 ahash_unmap(jrdev, edesc, req, digestsize);
1362 /* submit first update job descriptor after init */
1363 static int ahash_update_first(struct ahash_request *req)
1365 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1366 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1367 struct caam_hash_state *state = ahash_request_ctx(req);
1368 struct device *jrdev = ctx->jrdev;
1369 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1370 GFP_KERNEL : GFP_ATOMIC;
1371 u8 *next_buf = alt_buf(state);
1372 int *next_buflen = alt_buflen(state);
1375 int src_nents, mapped_nents;
1376 struct ahash_edesc *edesc;
1379 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1381 to_hash = req->nbytes - *next_buflen;
1384 src_nents = sg_nents_for_len(req->src,
1385 req->nbytes - *next_buflen);
1386 if (src_nents < 0) {
1387 dev_err(jrdev, "Invalid number of src SG.\n");
1392 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1394 if (!mapped_nents) {
1395 dev_err(jrdev, "unable to map source for DMA\n");
1403 * allocate space for base edesc and hw desc commands,
1406 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1408 ctx->sh_desc_update_first,
1409 ctx->sh_desc_update_first_dma,
1412 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1416 edesc->src_nents = src_nents;
1418 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1424 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1427 desc = edesc->hw_desc;
1429 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1434 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1435 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1436 desc_bytes(desc), 1);
1439 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1444 state->update = ahash_update_ctx;
1445 state->finup = ahash_finup_ctx;
1446 state->final = ahash_final_ctx;
1447 } else if (*next_buflen) {
1448 state->update = ahash_update_no_ctx;
1449 state->finup = ahash_finup_no_ctx;
1450 state->final = ahash_final_no_ctx;
1451 scatterwalk_map_and_copy(next_buf, req->src, 0,
1456 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1457 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1463 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1468 static int ahash_finup_first(struct ahash_request *req)
1470 return ahash_digest(req);
1473 static int ahash_init(struct ahash_request *req)
1475 struct caam_hash_state *state = ahash_request_ctx(req);
1477 state->update = ahash_update_first;
1478 state->finup = ahash_finup_first;
1479 state->final = ahash_final_no_ctx;
1482 state->current_buf = 0;
1484 state->buflen_0 = 0;
1485 state->buflen_1 = 0;
1490 static int ahash_update(struct ahash_request *req)
1492 struct caam_hash_state *state = ahash_request_ctx(req);
1494 return state->update(req);
1497 static int ahash_finup(struct ahash_request *req)
1499 struct caam_hash_state *state = ahash_request_ctx(req);
1501 return state->finup(req);
1504 static int ahash_final(struct ahash_request *req)
1506 struct caam_hash_state *state = ahash_request_ctx(req);
1508 return state->final(req);
1511 static int ahash_export(struct ahash_request *req, void *out)
1513 struct caam_hash_state *state = ahash_request_ctx(req);
1514 struct caam_export_state *export = out;
1518 if (state->current_buf) {
1520 len = state->buflen_1;
1523 len = state->buflen_0;
1526 memcpy(export->buf, buf, len);
1527 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1528 export->buflen = len;
1529 export->update = state->update;
1530 export->final = state->final;
1531 export->finup = state->finup;
1536 static int ahash_import(struct ahash_request *req, const void *in)
1538 struct caam_hash_state *state = ahash_request_ctx(req);
1539 const struct caam_export_state *export = in;
1541 memset(state, 0, sizeof(*state));
1542 memcpy(state->buf_0, export->buf, export->buflen);
1543 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1544 state->buflen_0 = export->buflen;
1545 state->update = export->update;
1546 state->final = export->final;
1547 state->finup = export->finup;
1552 struct caam_hash_template {
1553 char name[CRYPTO_MAX_ALG_NAME];
1554 char driver_name[CRYPTO_MAX_ALG_NAME];
1555 char hmac_name[CRYPTO_MAX_ALG_NAME];
1556 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1557 unsigned int blocksize;
1558 struct ahash_alg template_ahash;
1562 /* ahash descriptors */
1563 static struct caam_hash_template driver_hash[] = {
1566 .driver_name = "sha1-caam",
1567 .hmac_name = "hmac(sha1)",
1568 .hmac_driver_name = "hmac-sha1-caam",
1569 .blocksize = SHA1_BLOCK_SIZE,
1572 .update = ahash_update,
1573 .final = ahash_final,
1574 .finup = ahash_finup,
1575 .digest = ahash_digest,
1576 .export = ahash_export,
1577 .import = ahash_import,
1578 .setkey = ahash_setkey,
1580 .digestsize = SHA1_DIGEST_SIZE,
1581 .statesize = sizeof(struct caam_export_state),
1584 .alg_type = OP_ALG_ALGSEL_SHA1,
1587 .driver_name = "sha224-caam",
1588 .hmac_name = "hmac(sha224)",
1589 .hmac_driver_name = "hmac-sha224-caam",
1590 .blocksize = SHA224_BLOCK_SIZE,
1593 .update = ahash_update,
1594 .final = ahash_final,
1595 .finup = ahash_finup,
1596 .digest = ahash_digest,
1597 .export = ahash_export,
1598 .import = ahash_import,
1599 .setkey = ahash_setkey,
1601 .digestsize = SHA224_DIGEST_SIZE,
1602 .statesize = sizeof(struct caam_export_state),
1605 .alg_type = OP_ALG_ALGSEL_SHA224,
1608 .driver_name = "sha256-caam",
1609 .hmac_name = "hmac(sha256)",
1610 .hmac_driver_name = "hmac-sha256-caam",
1611 .blocksize = SHA256_BLOCK_SIZE,
1614 .update = ahash_update,
1615 .final = ahash_final,
1616 .finup = ahash_finup,
1617 .digest = ahash_digest,
1618 .export = ahash_export,
1619 .import = ahash_import,
1620 .setkey = ahash_setkey,
1622 .digestsize = SHA256_DIGEST_SIZE,
1623 .statesize = sizeof(struct caam_export_state),
1626 .alg_type = OP_ALG_ALGSEL_SHA256,
1629 .driver_name = "sha384-caam",
1630 .hmac_name = "hmac(sha384)",
1631 .hmac_driver_name = "hmac-sha384-caam",
1632 .blocksize = SHA384_BLOCK_SIZE,
1635 .update = ahash_update,
1636 .final = ahash_final,
1637 .finup = ahash_finup,
1638 .digest = ahash_digest,
1639 .export = ahash_export,
1640 .import = ahash_import,
1641 .setkey = ahash_setkey,
1643 .digestsize = SHA384_DIGEST_SIZE,
1644 .statesize = sizeof(struct caam_export_state),
1647 .alg_type = OP_ALG_ALGSEL_SHA384,
1650 .driver_name = "sha512-caam",
1651 .hmac_name = "hmac(sha512)",
1652 .hmac_driver_name = "hmac-sha512-caam",
1653 .blocksize = SHA512_BLOCK_SIZE,
1656 .update = ahash_update,
1657 .final = ahash_final,
1658 .finup = ahash_finup,
1659 .digest = ahash_digest,
1660 .export = ahash_export,
1661 .import = ahash_import,
1662 .setkey = ahash_setkey,
1664 .digestsize = SHA512_DIGEST_SIZE,
1665 .statesize = sizeof(struct caam_export_state),
1668 .alg_type = OP_ALG_ALGSEL_SHA512,
1671 .driver_name = "md5-caam",
1672 .hmac_name = "hmac(md5)",
1673 .hmac_driver_name = "hmac-md5-caam",
1674 .blocksize = MD5_BLOCK_WORDS * 4,
1677 .update = ahash_update,
1678 .final = ahash_final,
1679 .finup = ahash_finup,
1680 .digest = ahash_digest,
1681 .export = ahash_export,
1682 .import = ahash_import,
1683 .setkey = ahash_setkey,
1685 .digestsize = MD5_DIGEST_SIZE,
1686 .statesize = sizeof(struct caam_export_state),
1689 .alg_type = OP_ALG_ALGSEL_MD5,
1693 struct caam_hash_alg {
1694 struct list_head entry;
1696 struct ahash_alg ahash_alg;
1699 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1701 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1702 struct crypto_alg *base = tfm->__crt_alg;
1703 struct hash_alg_common *halg =
1704 container_of(base, struct hash_alg_common, base);
1705 struct ahash_alg *alg =
1706 container_of(halg, struct ahash_alg, halg);
1707 struct caam_hash_alg *caam_hash =
1708 container_of(alg, struct caam_hash_alg, ahash_alg);
1709 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1710 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1711 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1712 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1714 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1716 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1717 dma_addr_t dma_addr;
1720 * Get a Job ring from Job Ring driver to ensure in-order
1721 * crypto request processing per tfm
1723 ctx->jrdev = caam_jr_alloc();
1724 if (IS_ERR(ctx->jrdev)) {
1725 pr_err("Job Ring Device allocation for transform failed\n");
1726 return PTR_ERR(ctx->jrdev);
1729 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1730 offsetof(struct caam_hash_ctx,
1731 sh_desc_update_dma),
1732 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1733 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1734 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1735 caam_jr_free(ctx->jrdev);
1739 ctx->sh_desc_update_dma = dma_addr;
1740 ctx->sh_desc_update_first_dma = dma_addr +
1741 offsetof(struct caam_hash_ctx,
1742 sh_desc_update_first);
1743 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1745 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1748 /* copy descriptor header template value */
1749 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1751 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1752 OP_ALG_ALGSEL_SUBMASK) >>
1753 OP_ALG_ALGSEL_SHIFT];
1755 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1756 sizeof(struct caam_hash_state));
1757 return ahash_set_sh_desc(ahash);
1760 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1762 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1764 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1765 offsetof(struct caam_hash_ctx,
1766 sh_desc_update_dma),
1767 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1768 caam_jr_free(ctx->jrdev);
1771 static void __exit caam_algapi_hash_exit(void)
1773 struct caam_hash_alg *t_alg, *n;
1775 if (!hash_list.next)
1778 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1779 crypto_unregister_ahash(&t_alg->ahash_alg);
1780 list_del(&t_alg->entry);
1785 static struct caam_hash_alg *
1786 caam_hash_alloc(struct caam_hash_template *template,
1789 struct caam_hash_alg *t_alg;
1790 struct ahash_alg *halg;
1791 struct crypto_alg *alg;
1793 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1795 pr_err("failed to allocate t_alg\n");
1796 return ERR_PTR(-ENOMEM);
1799 t_alg->ahash_alg = template->template_ahash;
1800 halg = &t_alg->ahash_alg;
1801 alg = &halg->halg.base;
1804 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1805 template->hmac_name);
1806 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1807 template->hmac_driver_name);
1809 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1811 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1812 template->driver_name);
1813 t_alg->ahash_alg.setkey = NULL;
1815 alg->cra_module = THIS_MODULE;
1816 alg->cra_init = caam_hash_cra_init;
1817 alg->cra_exit = caam_hash_cra_exit;
1818 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1819 alg->cra_priority = CAAM_CRA_PRIORITY;
1820 alg->cra_blocksize = template->blocksize;
1821 alg->cra_alignmask = 0;
1822 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1823 alg->cra_type = &crypto_ahash_type;
1825 t_alg->alg_type = template->alg_type;
1830 static int __init caam_algapi_hash_init(void)
1832 struct device_node *dev_node;
1833 struct platform_device *pdev;
1834 struct device *ctrldev;
1836 struct caam_drv_private *priv;
1837 unsigned int md_limit = SHA512_DIGEST_SIZE;
1838 u32 cha_inst, cha_vid;
1840 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1842 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1847 pdev = of_find_device_by_node(dev_node);
1849 of_node_put(dev_node);
1853 ctrldev = &pdev->dev;
1854 priv = dev_get_drvdata(ctrldev);
1855 of_node_put(dev_node);
1858 * If priv is NULL, it's probably because the caam driver wasn't
1859 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1865 * Register crypto algorithms the device supports. First, identify
1866 * presence and attributes of MD block.
1868 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1869 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1872 * Skip registration of any hashing algorithms if MD block
1875 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1878 /* Limit digest size based on LP256 */
1879 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1880 md_limit = SHA256_DIGEST_SIZE;
1882 INIT_LIST_HEAD(&hash_list);
1884 /* register crypto algorithms the device supports */
1885 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1886 struct caam_hash_alg *t_alg;
1887 struct caam_hash_template *alg = driver_hash + i;
1889 /* If MD size is not supported by device, skip registration */
1890 if (alg->template_ahash.halg.digestsize > md_limit)
1893 /* register hmac version */
1894 t_alg = caam_hash_alloc(alg, true);
1895 if (IS_ERR(t_alg)) {
1896 err = PTR_ERR(t_alg);
1897 pr_warn("%s alg allocation failed\n", alg->driver_name);
1901 err = crypto_register_ahash(&t_alg->ahash_alg);
1903 pr_warn("%s alg registration failed: %d\n",
1904 t_alg->ahash_alg.halg.base.cra_driver_name,
1908 list_add_tail(&t_alg->entry, &hash_list);
1910 /* register unkeyed version */
1911 t_alg = caam_hash_alloc(alg, false);
1912 if (IS_ERR(t_alg)) {
1913 err = PTR_ERR(t_alg);
1914 pr_warn("%s alg allocation failed\n", alg->driver_name);
1918 err = crypto_register_ahash(&t_alg->ahash_alg);
1920 pr_warn("%s alg registration failed: %d\n",
1921 t_alg->ahash_alg.halg.base.cra_driver_name,
1925 list_add_tail(&t_alg->entry, &hash_list);
1931 module_init(caam_algapi_hash_init);
1932 module_exit(caam_algapi_hash_exit);
1934 MODULE_LICENSE("GPL");
1935 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1936 MODULE_AUTHOR("Freescale Semiconductor - NMG");