2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
123 struct caam_hash_state {
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
137 /* Common job descriptor seq in/out ptr routines */
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 if (dma_mapping_error(jrdev, state->ctx_dma)) {
147 dev_err(jrdev, "unable to map ctx\n");
151 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
156 /* Map req->result, and append seq_out_ptr command that points to it */
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158 u8 *result, int digestsize)
162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
168 /* Map current buffer in state and put it in link table */
169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170 struct sec4_sg_entry *sec4_sg,
175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
181 /* Map req->src and put it in link table */
182 static inline void src_map_to_sec4_sg(struct device *jrdev,
183 struct scatterlist *src, int src_nents,
184 struct sec4_sg_entry *sec4_sg,
187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
195 static inline dma_addr_t
196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 u8 *buf, dma_addr_t buf_dma, int buflen,
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
210 /* Map state->caam_ctx, and add it to link table */
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 struct caam_hash_state *state, int ctx_len,
213 struct sec4_sg_entry *sec4_sg, u32 flag)
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
216 if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 dev_err(jrdev, "unable to map ctx\n");
221 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
226 /* Common shared descriptor commands */
227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
230 ctx->split_key_len, CLASS_2 |
231 KEY_DEST_MDHA_SPLIT | KEY_ENC);
234 /* Append key if it has been set */
235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
239 init_sh_desc(desc, HDR_SHARE_SERIAL);
241 if (ctx->split_key_len) {
242 /* Skip if already shared */
243 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
246 append_key_ahash(desc, ctx);
248 set_jump_tgt_here(desc, key_jump_cmd);
251 /* Propagate errors from shared to job descriptor */
252 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
256 * For ahash read data from seqin following state->caam_ctx,
257 * and write resulting class2 context to seqout, which may be state->caam_ctx
260 static inline void ahash_append_load_str(u32 *desc, int digestsize)
262 /* Calculate remaining bytes to read */
263 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
265 /* Read remaining bytes */
266 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
267 FIFOLD_TYPE_MSG | KEY_VLF);
269 /* Store class2 context bytes */
270 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
271 LDST_SRCDST_BYTE_CONTEXT);
275 * For ahash update, final and finup, import context, read and write to seqout
277 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
279 struct caam_hash_ctx *ctx)
281 init_sh_desc_key_ahash(desc, ctx);
283 /* Import context from software */
284 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
285 LDST_CLASS_2_CCB | ctx->ctx_len);
287 /* Class 2 operation */
288 append_operation(desc, op | state | OP_ALG_ENCRYPT);
291 * Load from buf and/or src and write to req->result or state->context
293 ahash_append_load_str(desc, digestsize);
296 /* For ahash firsts and digest, read and write to seqout */
297 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
298 int digestsize, struct caam_hash_ctx *ctx)
300 init_sh_desc_key_ahash(desc, ctx);
302 /* Class 2 operation */
303 append_operation(desc, op | state | OP_ALG_ENCRYPT);
306 * Load from buf and/or src and write to req->result or state->context
308 ahash_append_load_str(desc, digestsize);
311 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
313 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
314 int digestsize = crypto_ahash_digestsize(ahash);
315 struct device *jrdev = ctx->jrdev;
319 if (ctx->split_key_len)
320 have_key = OP_ALG_AAI_HMAC_PRECOMP;
322 /* ahash_update shared descriptor */
323 desc = ctx->sh_desc_update;
325 init_sh_desc(desc, HDR_SHARE_SERIAL);
327 /* Import context from software */
328 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
329 LDST_CLASS_2_CCB | ctx->ctx_len);
331 /* Class 2 operation */
332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
335 /* Load data and write to result or context */
336 ahash_append_load_str(desc, ctx->ctx_len);
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
341 dev_err(jrdev, "unable to map shared descriptor\n");
345 print_hex_dump(KERN_ERR,
346 "ahash update shdesc@"__stringify(__LINE__)": ",
347 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
350 /* ahash_update_first shared descriptor */
351 desc = ctx->sh_desc_update_first;
353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
360 dev_err(jrdev, "unable to map shared descriptor\n");
364 print_hex_dump(KERN_ERR,
365 "ahash update first shdesc@"__stringify(__LINE__)": ",
366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
369 /* ahash_final shared descriptor */
370 desc = ctx->sh_desc_fin;
372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
373 OP_ALG_AS_FINALIZE, digestsize, ctx);
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
378 dev_err(jrdev, "unable to map shared descriptor\n");
382 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
383 DUMP_PREFIX_ADDRESS, 16, 4, desc,
384 desc_bytes(desc), 1);
387 /* ahash_finup shared descriptor */
388 desc = ctx->sh_desc_finup;
390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
391 OP_ALG_AS_FINALIZE, digestsize, ctx);
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
396 dev_err(jrdev, "unable to map shared descriptor\n");
400 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
401 DUMP_PREFIX_ADDRESS, 16, 4, desc,
402 desc_bytes(desc), 1);
405 /* ahash_digest shared descriptor */
406 desc = ctx->sh_desc_digest;
408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
415 dev_err(jrdev, "unable to map shared descriptor\n");
419 print_hex_dump(KERN_ERR,
420 "ahash digest shdesc@"__stringify(__LINE__)": ",
421 DUMP_PREFIX_ADDRESS, 16, 4, desc,
422 desc_bytes(desc), 1);
428 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
432 ctx->split_key_pad_len, key_in, keylen,
436 /* Digest hash size if it is too large */
437 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
438 u32 *keylen, u8 *key_out, u32 digestsize)
440 struct device *jrdev = ctx->jrdev;
442 struct split_key_result result;
443 dma_addr_t src_dma, dst_dma;
446 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
448 dev_err(jrdev, "unable to allocate key input memory\n");
452 init_job_desc(desc, 0);
454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
456 if (dma_mapping_error(jrdev, src_dma)) {
457 dev_err(jrdev, "unable to map key input memory\n");
461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
463 if (dma_mapping_error(jrdev, dst_dma)) {
464 dev_err(jrdev, "unable to map key output memory\n");
465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
470 /* Job descriptor to perform unkeyed hash on key_in */
471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
472 OP_ALG_AS_INITFINAL);
473 append_seq_in_ptr(desc, src_dma, *keylen, 0);
474 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
475 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
476 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
477 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
481 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
482 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
483 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
484 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
488 init_completion(&result.completion);
490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
493 wait_for_completion_interruptible(&result.completion);
496 print_hex_dump(KERN_ERR,
497 "digested key@"__stringify(__LINE__)": ",
498 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
505 *keylen = digestsize;
512 static int ahash_setkey(struct crypto_ahash *ahash,
513 const u8 *key, unsigned int keylen)
515 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
516 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518 struct device *jrdev = ctx->jrdev;
519 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
520 int digestsize = crypto_ahash_digestsize(ahash);
522 u8 *hashed_key = NULL;
525 printk(KERN_ERR "keylen %d\n", keylen);
528 if (keylen > blocksize) {
529 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
533 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
540 /* Pick class 2 key length from algorithm submask */
541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542 OP_ALG_ALGSEL_SHIFT] * 2;
543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
546 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547 ctx->split_key_len, ctx->split_key_pad_len);
548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
552 ret = gen_split_hash_key(ctx, key, keylen);
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
558 if (dma_mapping_error(jrdev, ctx->key_dma)) {
559 dev_err(jrdev, "unable to map key i/o memory\n");
564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566 ctx->split_key_pad_len, 1);
569 ret = ahash_set_sh_desc(ahash);
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
580 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
585 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table
588 * @chained: if source is chained
589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table
592 * @hw_desc: the h/w job descriptor followed by any referenced link tables
596 dma_addr_t sec4_sg_dma;
600 struct sec4_sg_entry *sec4_sg;
604 static inline void ahash_unmap(struct device *dev,
605 struct ahash_edesc *edesc,
606 struct ahash_request *req, int dst_len)
608 if (edesc->src_nents)
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
610 DMA_TO_DEVICE, edesc->chained);
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
614 if (edesc->sec4_sg_bytes)
615 dma_unmap_single(dev, edesc->sec4_sg_dma,
616 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
619 static inline void ahash_unmap_ctx(struct device *dev,
620 struct ahash_edesc *edesc,
621 struct ahash_request *req, int dst_len, u32 flag)
623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
625 struct caam_hash_state *state = ahash_request_ctx(req);
628 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629 ahash_unmap(dev, edesc, req, dst_len);
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
635 struct ahash_request *req = context;
636 struct ahash_edesc *edesc;
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 int digestsize = crypto_ahash_digestsize(ahash);
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 struct caam_hash_state *state = ahash_request_ctx(req);
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
646 edesc = (struct ahash_edesc *)((char *)desc -
647 offsetof(struct ahash_edesc, hw_desc));
649 caam_jr_strstatus(jrdev, err);
651 ahash_unmap(jrdev, edesc, req, digestsize);
655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
660 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664 req->base.complete(&req->base, err);
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
670 struct ahash_request *req = context;
671 struct ahash_edesc *edesc;
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
675 struct caam_hash_state *state = ahash_request_ctx(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
681 edesc = (struct ahash_edesc *)((char *)desc -
682 offsetof(struct ahash_edesc, hw_desc));
684 caam_jr_strstatus(jrdev, err);
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
695 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
699 req->base.complete(&req->base, err);
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
705 struct ahash_request *req = context;
706 struct ahash_edesc *edesc;
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 int digestsize = crypto_ahash_digestsize(ahash);
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 struct caam_hash_state *state = ahash_request_ctx(req);
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
716 edesc = (struct ahash_edesc *)((char *)desc -
717 offsetof(struct ahash_edesc, hw_desc));
719 caam_jr_strstatus(jrdev, err);
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
730 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
734 req->base.complete(&req->base, err);
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
740 struct ahash_request *req = context;
741 struct ahash_edesc *edesc;
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
745 struct caam_hash_state *state = ahash_request_ctx(req);
746 int digestsize = crypto_ahash_digestsize(ahash);
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
751 edesc = (struct ahash_edesc *)((char *)desc -
752 offsetof(struct ahash_edesc, hw_desc));
754 caam_jr_strstatus(jrdev, err);
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
769 req->base.complete(&req->base, err);
772 /* submit update job descriptor */
773 static int ahash_update_ctx(struct ahash_request *req)
775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777 struct caam_hash_state *state = ahash_request_ctx(req);
778 struct device *jrdev = ctx->jrdev;
779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784 int *next_buflen = state->current_buf ? &state->buflen_0 :
785 &state->buflen_1, last_buflen;
786 int in_len = *buflen + req->nbytes, to_hash;
787 u32 *sh_desc = ctx->sh_desc_update, *desc;
788 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc;
791 bool chained = false;
795 last_buflen = *next_buflen;
796 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797 to_hash = in_len - *next_buflen;
800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry);
807 * allocate space for base edesc and hw desc commands,
810 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
811 sec4_sg_bytes, GFP_DMA | flags);
814 "could not allocate extended descriptor\n");
818 edesc->src_nents = src_nents;
819 edesc->chained = chained;
820 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
825 edesc->sec4_sg, DMA_BIDIRECTIONAL);
829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
832 *buflen, last_buflen);
835 src_map_to_sec4_sg(jrdev, req->src, src_nents,
836 edesc->sec4_sg + sec4_sg_src_index,
839 scatterwalk_map_and_copy(next_buf, req->src,
843 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
847 state->current_buf = !state->current_buf;
849 sh_len = desc_len(sh_desc);
850 desc = edesc->hw_desc;
851 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
858 dev_err(jrdev, "unable to map S/G table\n");
862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
868 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
869 DUMP_PREFIX_ADDRESS, 16, 4, desc,
870 desc_bytes(desc), 1);
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
881 } else if (*next_buflen) {
882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
884 *buflen = *next_buflen;
885 *next_buflen = last_buflen;
888 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
889 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
890 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
891 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
898 static int ahash_final_ctx(struct ahash_request *req)
900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
901 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
902 struct caam_hash_state *state = ahash_request_ctx(req);
903 struct device *jrdev = ctx->jrdev;
904 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
905 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
906 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
907 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
908 int last_buflen = state->current_buf ? state->buflen_0 :
910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc;
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
920 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
922 sec4_sg_bytes, GFP_DMA | flags);
924 dev_err(jrdev, "could not allocate extended descriptor\n");
928 sh_len = desc_len(sh_desc);
929 desc = edesc->hw_desc;
930 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
932 edesc->sec4_sg_bytes = sec4_sg_bytes;
933 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
935 edesc->src_nents = 0;
937 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
938 edesc->sec4_sg, DMA_TO_DEVICE);
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen,
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE);
949 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
950 dev_err(jrdev, "unable to map S/G table\n");
954 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
957 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
959 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
960 dev_err(jrdev, "unable to map dst\n");
965 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
966 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
969 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
973 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
980 static int ahash_finup_ctx(struct ahash_request *req)
982 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
983 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
984 struct caam_hash_state *state = ahash_request_ctx(req);
985 struct device *jrdev = ctx->jrdev;
986 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
987 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
988 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
989 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
990 int last_buflen = state->current_buf ? state->buflen_0 :
992 u32 *sh_desc = ctx->sh_desc_finup, *desc;
993 dma_addr_t ptr = ctx->sh_desc_finup_dma;
994 int sec4_sg_bytes, sec4_sg_src_index;
996 int digestsize = crypto_ahash_digestsize(ahash);
997 struct ahash_edesc *edesc;
998 bool chained = false;
1002 src_nents = __sg_count(req->src, req->nbytes, &chained);
1003 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1004 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1005 sizeof(struct sec4_sg_entry);
1007 /* allocate space for base edesc and hw desc commands, link tables */
1008 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1009 sec4_sg_bytes, GFP_DMA | flags);
1011 dev_err(jrdev, "could not allocate extended descriptor\n");
1015 sh_len = desc_len(sh_desc);
1016 desc = edesc->hw_desc;
1017 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1019 edesc->src_nents = src_nents;
1020 edesc->chained = chained;
1021 edesc->sec4_sg_bytes = sec4_sg_bytes;
1022 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1025 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1026 edesc->sec4_sg, DMA_TO_DEVICE);
1030 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1031 buf, state->buf_dma, buflen,
1034 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1035 sec4_sg_src_index, chained);
1037 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1038 sec4_sg_bytes, DMA_TO_DEVICE);
1039 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1040 dev_err(jrdev, "unable to map S/G table\n");
1044 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1045 buflen + req->nbytes, LDST_SGF);
1047 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1049 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1050 dev_err(jrdev, "unable to map dst\n");
1055 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1056 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1059 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1063 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1070 static int ahash_digest(struct ahash_request *req)
1072 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1073 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1074 struct device *jrdev = ctx->jrdev;
1075 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1076 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1077 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1078 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1079 int digestsize = crypto_ahash_digestsize(ahash);
1080 int src_nents, sec4_sg_bytes;
1082 struct ahash_edesc *edesc;
1083 bool chained = false;
1088 src_nents = sg_count(req->src, req->nbytes, &chained);
1089 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1091 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1093 /* allocate space for base edesc and hw desc commands, link tables */
1094 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1095 DESC_JOB_IO_LEN, GFP_DMA | flags);
1097 dev_err(jrdev, "could not allocate extended descriptor\n");
1100 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1102 edesc->sec4_sg_bytes = sec4_sg_bytes;
1103 edesc->src_nents = src_nents;
1104 edesc->chained = chained;
1106 sh_len = desc_len(sh_desc);
1107 desc = edesc->hw_desc;
1108 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1111 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1112 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1113 sec4_sg_bytes, DMA_TO_DEVICE);
1114 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1115 dev_err(jrdev, "unable to map S/G table\n");
1118 src_dma = edesc->sec4_sg_dma;
1121 src_dma = sg_dma_address(req->src);
1124 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1126 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1128 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1129 dev_err(jrdev, "unable to map dst\n");
1134 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1135 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1138 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1142 ahash_unmap(jrdev, edesc, req, digestsize);
1149 /* submit ahash final if it the first job descriptor */
1150 static int ahash_final_no_ctx(struct ahash_request *req)
1152 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1153 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1154 struct caam_hash_state *state = ahash_request_ctx(req);
1155 struct device *jrdev = ctx->jrdev;
1156 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1157 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1158 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1159 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1160 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1161 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1162 int digestsize = crypto_ahash_digestsize(ahash);
1163 struct ahash_edesc *edesc;
1167 /* allocate space for base edesc and hw desc commands, link tables */
1168 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1171 dev_err(jrdev, "could not allocate extended descriptor\n");
1175 edesc->sec4_sg_bytes = 0;
1176 sh_len = desc_len(sh_desc);
1177 desc = edesc->hw_desc;
1178 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1180 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1181 if (dma_mapping_error(jrdev, state->buf_dma)) {
1182 dev_err(jrdev, "unable to map src\n");
1186 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1188 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1190 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1191 dev_err(jrdev, "unable to map dst\n");
1194 edesc->src_nents = 0;
1197 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1198 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1201 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1205 ahash_unmap(jrdev, edesc, req, digestsize);
1212 /* submit ahash update if it the first job descriptor after update */
1213 static int ahash_update_no_ctx(struct ahash_request *req)
1215 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1216 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1217 struct caam_hash_state *state = ahash_request_ctx(req);
1218 struct device *jrdev = ctx->jrdev;
1219 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1220 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1221 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1222 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1223 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1224 int *next_buflen = state->current_buf ? &state->buflen_0 :
1226 int in_len = *buflen + req->nbytes, to_hash;
1227 int sec4_sg_bytes, src_nents;
1228 struct ahash_edesc *edesc;
1229 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1230 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1231 bool chained = false;
1235 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1236 to_hash = in_len - *next_buflen;
1239 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1241 sec4_sg_bytes = (1 + src_nents) *
1242 sizeof(struct sec4_sg_entry);
1245 * allocate space for base edesc and hw desc commands,
1248 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1249 sec4_sg_bytes, GFP_DMA | flags);
1252 "could not allocate extended descriptor\n");
1256 edesc->src_nents = src_nents;
1257 edesc->chained = chained;
1258 edesc->sec4_sg_bytes = sec4_sg_bytes;
1259 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1263 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1265 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1266 edesc->sec4_sg + 1, chained);
1268 scatterwalk_map_and_copy(next_buf, req->src,
1273 state->current_buf = !state->current_buf;
1275 sh_len = desc_len(sh_desc);
1276 desc = edesc->hw_desc;
1277 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1280 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1283 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1284 dev_err(jrdev, "unable to map S/G table\n");
1288 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1290 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1295 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1296 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1297 desc_bytes(desc), 1);
1300 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1303 state->update = ahash_update_ctx;
1304 state->finup = ahash_finup_ctx;
1305 state->final = ahash_final_ctx;
1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1311 } else if (*next_buflen) {
1312 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1314 *buflen = *next_buflen;
1318 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1319 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1320 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1321 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1328 /* submit ahash finup if it the first job descriptor after update */
1329 static int ahash_finup_no_ctx(struct ahash_request *req)
1331 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1332 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1333 struct caam_hash_state *state = ahash_request_ctx(req);
1334 struct device *jrdev = ctx->jrdev;
1335 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1336 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1337 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1338 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1339 int last_buflen = state->current_buf ? state->buflen_0 :
1341 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1342 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1343 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1344 int digestsize = crypto_ahash_digestsize(ahash);
1345 struct ahash_edesc *edesc;
1346 bool chained = false;
1350 src_nents = __sg_count(req->src, req->nbytes, &chained);
1351 sec4_sg_src_index = 2;
1352 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1353 sizeof(struct sec4_sg_entry);
1355 /* allocate space for base edesc and hw desc commands, link tables */
1356 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1357 sec4_sg_bytes, GFP_DMA | flags);
1359 dev_err(jrdev, "could not allocate extended descriptor\n");
1363 sh_len = desc_len(sh_desc);
1364 desc = edesc->hw_desc;
1365 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1367 edesc->src_nents = src_nents;
1368 edesc->chained = chained;
1369 edesc->sec4_sg_bytes = sec4_sg_bytes;
1370 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1373 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1374 state->buf_dma, buflen,
1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1380 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1381 sec4_sg_bytes, DMA_TO_DEVICE);
1382 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1383 dev_err(jrdev, "unable to map S/G table\n");
1387 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1388 req->nbytes, LDST_SGF);
1390 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1392 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1393 dev_err(jrdev, "unable to map dst\n");
1398 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1399 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1402 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1406 ahash_unmap(jrdev, edesc, req, digestsize);
1413 /* submit first update job descriptor after init */
1414 static int ahash_update_first(struct ahash_request *req)
1416 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1417 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1418 struct caam_hash_state *state = ahash_request_ctx(req);
1419 struct device *jrdev = ctx->jrdev;
1420 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1421 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1422 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1423 int *next_buflen = state->current_buf ?
1424 &state->buflen_1 : &state->buflen_0;
1426 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1427 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1428 int sec4_sg_bytes, src_nents;
1431 struct ahash_edesc *edesc;
1432 bool chained = false;
1436 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1438 to_hash = req->nbytes - *next_buflen;
1441 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1443 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1444 DMA_TO_DEVICE, chained);
1445 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1448 * allocate space for base edesc and hw desc commands,
1451 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1452 sec4_sg_bytes, GFP_DMA | flags);
1455 "could not allocate extended descriptor\n");
1459 edesc->src_nents = src_nents;
1460 edesc->chained = chained;
1461 edesc->sec4_sg_bytes = sec4_sg_bytes;
1462 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1467 sg_to_sec4_sg_last(req->src, src_nents,
1469 edesc->sec4_sg_dma = dma_map_single(jrdev,
1473 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1474 dev_err(jrdev, "unable to map S/G table\n");
1477 src_dma = edesc->sec4_sg_dma;
1480 src_dma = sg_dma_address(req->src);
1485 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1488 sh_len = desc_len(sh_desc);
1489 desc = edesc->hw_desc;
1490 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1493 append_seq_in_ptr(desc, src_dma, to_hash, options);
1495 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1500 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1501 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1502 desc_bytes(desc), 1);
1505 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1509 state->update = ahash_update_ctx;
1510 state->finup = ahash_finup_ctx;
1511 state->final = ahash_final_ctx;
1513 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1517 } else if (*next_buflen) {
1518 state->update = ahash_update_no_ctx;
1519 state->finup = ahash_finup_no_ctx;
1520 state->final = ahash_final_no_ctx;
1521 scatterwalk_map_and_copy(next_buf, req->src, 0,
1525 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1526 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1533 static int ahash_finup_first(struct ahash_request *req)
1535 return ahash_digest(req);
1538 static int ahash_init(struct ahash_request *req)
1540 struct caam_hash_state *state = ahash_request_ctx(req);
1542 state->update = ahash_update_first;
1543 state->finup = ahash_finup_first;
1544 state->final = ahash_final_no_ctx;
1546 state->current_buf = 0;
1548 state->buflen_0 = 0;
1549 state->buflen_1 = 0;
1554 static int ahash_update(struct ahash_request *req)
1556 struct caam_hash_state *state = ahash_request_ctx(req);
1558 return state->update(req);
1561 static int ahash_finup(struct ahash_request *req)
1563 struct caam_hash_state *state = ahash_request_ctx(req);
1565 return state->finup(req);
1568 static int ahash_final(struct ahash_request *req)
1570 struct caam_hash_state *state = ahash_request_ctx(req);
1572 return state->final(req);
1575 static int ahash_export(struct ahash_request *req, void *out)
1577 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1578 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1579 struct caam_hash_state *state = ahash_request_ctx(req);
1581 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1582 memcpy(out + sizeof(struct caam_hash_ctx), state,
1583 sizeof(struct caam_hash_state));
1587 static int ahash_import(struct ahash_request *req, const void *in)
1589 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1590 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1591 struct caam_hash_state *state = ahash_request_ctx(req);
1593 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1594 memcpy(state, in + sizeof(struct caam_hash_ctx),
1595 sizeof(struct caam_hash_state));
1599 struct caam_hash_template {
1600 char name[CRYPTO_MAX_ALG_NAME];
1601 char driver_name[CRYPTO_MAX_ALG_NAME];
1602 char hmac_name[CRYPTO_MAX_ALG_NAME];
1603 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1604 unsigned int blocksize;
1605 struct ahash_alg template_ahash;
1610 /* ahash descriptors */
1611 static struct caam_hash_template driver_hash[] = {
1614 .driver_name = "sha1-caam",
1615 .hmac_name = "hmac(sha1)",
1616 .hmac_driver_name = "hmac-sha1-caam",
1617 .blocksize = SHA1_BLOCK_SIZE,
1620 .update = ahash_update,
1621 .final = ahash_final,
1622 .finup = ahash_finup,
1623 .digest = ahash_digest,
1624 .export = ahash_export,
1625 .import = ahash_import,
1626 .setkey = ahash_setkey,
1628 .digestsize = SHA1_DIGEST_SIZE,
1631 .alg_type = OP_ALG_ALGSEL_SHA1,
1632 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1635 .driver_name = "sha224-caam",
1636 .hmac_name = "hmac(sha224)",
1637 .hmac_driver_name = "hmac-sha224-caam",
1638 .blocksize = SHA224_BLOCK_SIZE,
1641 .update = ahash_update,
1642 .final = ahash_final,
1643 .finup = ahash_finup,
1644 .digest = ahash_digest,
1645 .export = ahash_export,
1646 .import = ahash_import,
1647 .setkey = ahash_setkey,
1649 .digestsize = SHA224_DIGEST_SIZE,
1652 .alg_type = OP_ALG_ALGSEL_SHA224,
1653 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1656 .driver_name = "sha256-caam",
1657 .hmac_name = "hmac(sha256)",
1658 .hmac_driver_name = "hmac-sha256-caam",
1659 .blocksize = SHA256_BLOCK_SIZE,
1662 .update = ahash_update,
1663 .final = ahash_final,
1664 .finup = ahash_finup,
1665 .digest = ahash_digest,
1666 .export = ahash_export,
1667 .import = ahash_import,
1668 .setkey = ahash_setkey,
1670 .digestsize = SHA256_DIGEST_SIZE,
1673 .alg_type = OP_ALG_ALGSEL_SHA256,
1674 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1677 .driver_name = "sha384-caam",
1678 .hmac_name = "hmac(sha384)",
1679 .hmac_driver_name = "hmac-sha384-caam",
1680 .blocksize = SHA384_BLOCK_SIZE,
1683 .update = ahash_update,
1684 .final = ahash_final,
1685 .finup = ahash_finup,
1686 .digest = ahash_digest,
1687 .export = ahash_export,
1688 .import = ahash_import,
1689 .setkey = ahash_setkey,
1691 .digestsize = SHA384_DIGEST_SIZE,
1694 .alg_type = OP_ALG_ALGSEL_SHA384,
1695 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1698 .driver_name = "sha512-caam",
1699 .hmac_name = "hmac(sha512)",
1700 .hmac_driver_name = "hmac-sha512-caam",
1701 .blocksize = SHA512_BLOCK_SIZE,
1704 .update = ahash_update,
1705 .final = ahash_final,
1706 .finup = ahash_finup,
1707 .digest = ahash_digest,
1708 .export = ahash_export,
1709 .import = ahash_import,
1710 .setkey = ahash_setkey,
1712 .digestsize = SHA512_DIGEST_SIZE,
1715 .alg_type = OP_ALG_ALGSEL_SHA512,
1716 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1719 .driver_name = "md5-caam",
1720 .hmac_name = "hmac(md5)",
1721 .hmac_driver_name = "hmac-md5-caam",
1722 .blocksize = MD5_BLOCK_WORDS * 4,
1725 .update = ahash_update,
1726 .final = ahash_final,
1727 .finup = ahash_finup,
1728 .digest = ahash_digest,
1729 .export = ahash_export,
1730 .import = ahash_import,
1731 .setkey = ahash_setkey,
1733 .digestsize = MD5_DIGEST_SIZE,
1736 .alg_type = OP_ALG_ALGSEL_MD5,
1737 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1741 struct caam_hash_alg {
1742 struct list_head entry;
1745 struct ahash_alg ahash_alg;
1748 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1750 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1751 struct crypto_alg *base = tfm->__crt_alg;
1752 struct hash_alg_common *halg =
1753 container_of(base, struct hash_alg_common, base);
1754 struct ahash_alg *alg =
1755 container_of(halg, struct ahash_alg, halg);
1756 struct caam_hash_alg *caam_hash =
1757 container_of(alg, struct caam_hash_alg, ahash_alg);
1758 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1759 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1760 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1761 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1763 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1765 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1769 * Get a Job ring from Job Ring driver to ensure in-order
1770 * crypto request processing per tfm
1772 ctx->jrdev = caam_jr_alloc();
1773 if (IS_ERR(ctx->jrdev)) {
1774 pr_err("Job Ring Device allocation for transform failed\n");
1775 return PTR_ERR(ctx->jrdev);
1777 /* copy descriptor header template value */
1778 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1779 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1781 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1782 OP_ALG_ALGSEL_SHIFT];
1784 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1785 sizeof(struct caam_hash_state));
1787 ret = ahash_set_sh_desc(ahash);
1792 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1794 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1796 if (ctx->sh_desc_update_dma &&
1797 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1798 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1799 desc_bytes(ctx->sh_desc_update),
1801 if (ctx->sh_desc_update_first_dma &&
1802 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1803 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1804 desc_bytes(ctx->sh_desc_update_first),
1806 if (ctx->sh_desc_fin_dma &&
1807 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1808 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1809 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1810 if (ctx->sh_desc_digest_dma &&
1811 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1812 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1813 desc_bytes(ctx->sh_desc_digest),
1815 if (ctx->sh_desc_finup_dma &&
1816 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1817 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1818 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1820 caam_jr_free(ctx->jrdev);
1823 static void __exit caam_algapi_hash_exit(void)
1825 struct caam_hash_alg *t_alg, *n;
1827 if (!hash_list.next)
1830 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1831 crypto_unregister_ahash(&t_alg->ahash_alg);
1832 list_del(&t_alg->entry);
1837 static struct caam_hash_alg *
1838 caam_hash_alloc(struct caam_hash_template *template,
1841 struct caam_hash_alg *t_alg;
1842 struct ahash_alg *halg;
1843 struct crypto_alg *alg;
1845 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1847 pr_err("failed to allocate t_alg\n");
1848 return ERR_PTR(-ENOMEM);
1851 t_alg->ahash_alg = template->template_ahash;
1852 halg = &t_alg->ahash_alg;
1853 alg = &halg->halg.base;
1856 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1857 template->hmac_name);
1858 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1859 template->hmac_driver_name);
1861 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1863 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1864 template->driver_name);
1866 alg->cra_module = THIS_MODULE;
1867 alg->cra_init = caam_hash_cra_init;
1868 alg->cra_exit = caam_hash_cra_exit;
1869 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1870 alg->cra_priority = CAAM_CRA_PRIORITY;
1871 alg->cra_blocksize = template->blocksize;
1872 alg->cra_alignmask = 0;
1873 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1874 alg->cra_type = &crypto_ahash_type;
1876 t_alg->alg_type = template->alg_type;
1877 t_alg->alg_op = template->alg_op;
1882 static int __init caam_algapi_hash_init(void)
1884 struct device_node *dev_node;
1885 struct platform_device *pdev;
1886 struct device *ctrldev;
1890 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1892 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1897 pdev = of_find_device_by_node(dev_node);
1899 of_node_put(dev_node);
1903 ctrldev = &pdev->dev;
1904 priv = dev_get_drvdata(ctrldev);
1905 of_node_put(dev_node);
1908 * If priv is NULL, it's probably because the caam driver wasn't
1909 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1914 INIT_LIST_HEAD(&hash_list);
1916 /* register crypto algorithms the device supports */
1917 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1918 /* TODO: check if h/w supports alg */
1919 struct caam_hash_alg *t_alg;
1921 /* register hmac version */
1922 t_alg = caam_hash_alloc(&driver_hash[i], true);
1923 if (IS_ERR(t_alg)) {
1924 err = PTR_ERR(t_alg);
1925 pr_warn("%s alg allocation failed\n",
1926 driver_hash[i].driver_name);
1930 err = crypto_register_ahash(&t_alg->ahash_alg);
1932 pr_warn("%s alg registration failed\n",
1933 t_alg->ahash_alg.halg.base.cra_driver_name);
1936 list_add_tail(&t_alg->entry, &hash_list);
1938 /* register unkeyed version */
1939 t_alg = caam_hash_alloc(&driver_hash[i], false);
1940 if (IS_ERR(t_alg)) {
1941 err = PTR_ERR(t_alg);
1942 pr_warn("%s alg allocation failed\n",
1943 driver_hash[i].driver_name);
1947 err = crypto_register_ahash(&t_alg->ahash_alg);
1949 pr_warn("%s alg registration failed\n",
1950 t_alg->ahash_alg.halg.base.cra_driver_name);
1953 list_add_tail(&t_alg->entry, &hash_list);
1959 module_init(caam_algapi_hash_init);
1960 module_exit(caam_algapi_hash_exit);
1962 MODULE_LICENSE("GPL");
1963 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1964 MODULE_AUTHOR("Freescale Semiconductor - NMG");