1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) driver
5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
11 #include <linux/dma-mapping.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/interrupt.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/des.h>
17 #include <linux/ccp.h>
21 /* SHA initial context values */
22 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
23 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
24 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
28 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
29 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
30 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
31 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
32 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
35 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
36 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
37 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
38 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
39 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
43 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
44 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
45 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
46 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
49 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
50 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
51 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
52 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
53 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
57 ccp_gen_jobid(ccp) : 0)
59 static u32 ccp_gen_jobid(struct ccp_device *ccp)
61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
64 static void ccp_sg_free(struct ccp_sg_workarea *wa)
67 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
72 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
73 struct scatterlist *sg, u64 len,
74 enum dma_data_direction dma_dir)
76 memset(wa, 0, sizeof(*wa));
82 wa->nents = sg_nents_for_len(sg, len);
92 if (dma_dir == DMA_NONE)
98 wa->dma_dir = dma_dir;
99 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
106 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
108 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
109 unsigned int sg_combined_len = 0;
114 wa->sg_used += nbytes;
115 wa->bytes_left -= nbytes;
116 if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
117 /* Advance to the next DMA scatterlist entry */
118 wa->dma_sg = sg_next(wa->dma_sg);
120 /* In the case that the DMA mapped scatterlist has entries
121 * that have been merged, the non-DMA mapped scatterlist
122 * must be advanced multiple times for each merged entry.
123 * This ensures that the current non-DMA mapped entry
124 * corresponds to the current DMA mapped entry.
127 sg_combined_len += wa->sg->length;
128 wa->sg = sg_next(wa->sg);
129 } while (wa->sg_used > sg_combined_len);
135 static void ccp_dm_free(struct ccp_dm_workarea *wa)
137 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
139 dma_pool_free(wa->dma_pool, wa->address,
143 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
152 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
153 struct ccp_cmd_queue *cmd_q,
155 enum dma_data_direction dir)
157 memset(wa, 0, sizeof(*wa));
162 wa->dev = cmd_q->ccp->dev;
165 if (len <= CCP_DMAPOOL_MAX_SIZE) {
166 wa->dma_pool = cmd_q->dma_pool;
168 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL,
173 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
176 wa->address = kzalloc(len, GFP_KERNEL);
180 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
182 if (dma_mapping_error(wa->dev, wa->dma.address))
185 wa->dma.length = len;
192 static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
193 struct scatterlist *sg, unsigned int sg_offset,
196 WARN_ON(!wa->address);
198 if (len > (wa->length - wa_offset))
201 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
206 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
207 struct scatterlist *sg, unsigned int sg_offset,
210 WARN_ON(!wa->address);
212 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
216 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
217 unsigned int wa_offset,
218 struct scatterlist *sg,
219 unsigned int sg_offset,
225 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
229 p = wa->address + wa_offset;
241 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
242 unsigned int wa_offset,
243 struct scatterlist *sg,
244 unsigned int sg_offset,
249 p = wa->address + wa_offset;
259 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
262 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
264 ccp_dm_free(&data->dm_wa);
265 ccp_sg_free(&data->sg_wa);
268 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
269 struct scatterlist *sg, u64 sg_len,
271 enum dma_data_direction dir)
275 memset(data, 0, sizeof(*data));
277 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
282 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
289 ccp_free_data(data, cmd_q);
294 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
296 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
297 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
298 unsigned int buf_count, nbytes;
300 /* Clear the buffer if setting it */
302 memset(dm_wa->address, 0, dm_wa->length);
307 /* Perform the copy operation
308 * nbytes will always be <= UINT_MAX because dm_wa->length is
311 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
312 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
315 /* Update the structures and generate the count */
317 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
318 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
319 dm_wa->length - buf_count);
320 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
323 ccp_update_sg_workarea(sg_wa, nbytes);
329 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
331 return ccp_queue_buf(data, 0);
334 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
336 return ccp_queue_buf(data, 1);
339 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
340 struct ccp_op *op, unsigned int block_size,
343 unsigned int sg_src_len, sg_dst_len, op_len;
345 /* The CCP can only DMA from/to one address each per operation. This
346 * requires that we find the smallest DMA area between the source
347 * and destination. The resulting len values will always be <= UINT_MAX
348 * because the dma length is an unsigned int.
350 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
351 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
354 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
355 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
356 op_len = min(sg_src_len, sg_dst_len);
361 /* The data operation length will be at least block_size in length
362 * or the smaller of available sg room remaining for the source or
365 op_len = max(op_len, block_size);
367 /* Unless we have to buffer data, there's no reason to wait */
370 if (sg_src_len < block_size) {
371 /* Not enough data in the sg element, so it
372 * needs to be buffered into a blocksize chunk
374 int cp_len = ccp_fill_queue_buf(src);
377 op->src.u.dma.address = src->dm_wa.dma.address;
378 op->src.u.dma.offset = 0;
379 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
381 /* Enough data in the sg element, but we need to
382 * adjust for any previously copied data
384 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
385 op->src.u.dma.offset = src->sg_wa.sg_used;
386 op->src.u.dma.length = op_len & ~(block_size - 1);
388 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
392 if (sg_dst_len < block_size) {
393 /* Not enough room in the sg element or we're on the
394 * last piece of data (when using padding), so the
395 * output needs to be buffered into a blocksize chunk
398 op->dst.u.dma.address = dst->dm_wa.dma.address;
399 op->dst.u.dma.offset = 0;
400 op->dst.u.dma.length = op->src.u.dma.length;
402 /* Enough room in the sg element, but we need to
403 * adjust for any previously used area
405 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
406 op->dst.u.dma.offset = dst->sg_wa.sg_used;
407 op->dst.u.dma.length = op->src.u.dma.length;
412 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
418 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
419 ccp_empty_queue_buf(dst);
421 ccp_update_sg_workarea(&dst->sg_wa,
422 op->dst.u.dma.length);
426 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
427 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
428 u32 byte_swap, bool from)
432 memset(&op, 0, sizeof(op));
440 op.src.type = CCP_MEMTYPE_SB;
442 op.dst.type = CCP_MEMTYPE_SYSTEM;
443 op.dst.u.dma.address = wa->dma.address;
444 op.dst.u.dma.length = wa->length;
446 op.src.type = CCP_MEMTYPE_SYSTEM;
447 op.src.u.dma.address = wa->dma.address;
448 op.src.u.dma.length = wa->length;
449 op.dst.type = CCP_MEMTYPE_SB;
453 op.u.passthru.byte_swap = byte_swap;
455 return cmd_q->ccp->vdata->perform->passthru(&op);
458 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
459 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
462 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
465 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
466 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
469 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
472 static noinline_for_stack int
473 ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
475 struct ccp_aes_engine *aes = &cmd->u.aes;
476 struct ccp_dm_workarea key, ctx;
479 unsigned int dm_offset;
482 if (!((aes->key_len == AES_KEYSIZE_128) ||
483 (aes->key_len == AES_KEYSIZE_192) ||
484 (aes->key_len == AES_KEYSIZE_256)))
487 if (aes->src_len & (AES_BLOCK_SIZE - 1))
490 if (aes->iv_len != AES_BLOCK_SIZE)
493 if (!aes->key || !aes->iv || !aes->src)
496 if (aes->cmac_final) {
497 if (aes->cmac_key_len != AES_BLOCK_SIZE)
504 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
505 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
508 memset(&op, 0, sizeof(op));
510 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
511 op.sb_key = cmd_q->sb_key;
512 op.sb_ctx = cmd_q->sb_ctx;
514 op.u.aes.type = aes->type;
515 op.u.aes.mode = aes->mode;
516 op.u.aes.action = aes->action;
518 /* All supported key sizes fit in a single (32-byte) SB entry
519 * and must be in little endian format. Use the 256-bit byte
520 * swap passthru option to convert from big endian to little
523 ret = ccp_init_dm_workarea(&key, cmd_q,
524 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
529 dm_offset = CCP_SB_BYTES - aes->key_len;
530 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
533 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
534 CCP_PASSTHRU_BYTESWAP_256BIT);
536 cmd->engine_error = cmd_q->cmd_error;
540 /* The AES context fits in a single (32-byte) SB entry and
541 * must be in little endian format. Use the 256-bit byte swap
542 * passthru option to convert from big endian to little endian.
544 ret = ccp_init_dm_workarea(&ctx, cmd_q,
545 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
550 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
551 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
554 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
555 CCP_PASSTHRU_BYTESWAP_256BIT);
557 cmd->engine_error = cmd_q->cmd_error;
561 /* Send data to the CCP AES engine */
562 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
563 AES_BLOCK_SIZE, DMA_TO_DEVICE);
567 while (src.sg_wa.bytes_left) {
568 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
569 if (aes->cmac_final && !src.sg_wa.bytes_left) {
572 /* Push the K1/K2 key to the CCP now */
573 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
575 CCP_PASSTHRU_BYTESWAP_256BIT);
577 cmd->engine_error = cmd_q->cmd_error;
581 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
585 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
586 CCP_PASSTHRU_BYTESWAP_256BIT);
588 cmd->engine_error = cmd_q->cmd_error;
593 ret = cmd_q->ccp->vdata->perform->aes(&op);
595 cmd->engine_error = cmd_q->cmd_error;
599 ccp_process_data(&src, NULL, &op);
602 /* Retrieve the AES context - convert from LE to BE using
603 * 32-byte (256-bit) byteswapping
605 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
606 CCP_PASSTHRU_BYTESWAP_256BIT);
608 cmd->engine_error = cmd_q->cmd_error;
612 /* ...but we only need AES_BLOCK_SIZE bytes */
613 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
614 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
617 ccp_free_data(&src, cmd_q);
628 static noinline_for_stack int
629 ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
631 struct ccp_aes_engine *aes = &cmd->u.aes;
632 struct ccp_dm_workarea key, ctx, final_wa, tag;
633 struct ccp_data src, dst;
636 unsigned int dm_offset;
637 unsigned int authsize;
640 bool in_place = true; /* Default value */
644 struct scatterlist *p_inp, sg_inp[2];
645 struct scatterlist *p_tag, sg_tag[2];
646 struct scatterlist *p_outp, sg_outp[2];
647 struct scatterlist *p_aad;
652 if (!((aes->key_len == AES_KEYSIZE_128) ||
653 (aes->key_len == AES_KEYSIZE_192) ||
654 (aes->key_len == AES_KEYSIZE_256)))
657 if (!aes->key) /* Gotta have a key SGL */
660 /* Zero defaults to 16 bytes, the maximum size */
661 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
675 /* First, decompose the source buffer into AAD & PT,
676 * and the destination buffer into AAD, CT & tag, or
677 * the input into CT & tag.
678 * It is expected that the input and output SGs will
679 * be valid, even if the AAD and input lengths are 0.
682 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
683 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
684 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
686 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
688 /* Input length for decryption includes tag */
689 ilen = aes->src_len - authsize;
690 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
693 jobid = CCP_NEW_JOBID(cmd_q->ccp);
695 memset(&op, 0, sizeof(op));
698 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
699 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
701 op.u.aes.type = aes->type;
703 /* Copy the key to the LSB */
704 ret = ccp_init_dm_workarea(&key, cmd_q,
705 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
710 dm_offset = CCP_SB_BYTES - aes->key_len;
711 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
714 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
715 CCP_PASSTHRU_BYTESWAP_256BIT);
717 cmd->engine_error = cmd_q->cmd_error;
721 /* Copy the context (IV) to the LSB.
722 * There is an assumption here that the IV is 96 bits in length, plus
723 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
725 ret = ccp_init_dm_workarea(&ctx, cmd_q,
726 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
731 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
732 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
736 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
737 CCP_PASSTHRU_BYTESWAP_256BIT);
739 cmd->engine_error = cmd_q->cmd_error;
744 if (aes->aad_len > 0) {
745 /* Step 1: Run a GHASH over the Additional Authenticated Data */
746 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
752 op.u.aes.mode = CCP_AES_MODE_GHASH;
753 op.u.aes.action = CCP_AES_GHASHAAD;
755 while (aad.sg_wa.bytes_left) {
756 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
758 ret = cmd_q->ccp->vdata->perform->aes(&op);
760 cmd->engine_error = cmd_q->cmd_error;
764 ccp_process_data(&aad, NULL, &op);
769 op.u.aes.mode = CCP_AES_MODE_GCTR;
770 op.u.aes.action = aes->action;
773 /* Step 2: Run a GCTR over the plaintext */
774 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
776 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
778 in_place ? DMA_BIDIRECTIONAL
786 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
787 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
795 while (src.sg_wa.bytes_left) {
796 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
797 if (!src.sg_wa.bytes_left) {
798 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
802 op.u.aes.size = (nbytes * 8) - 1;
806 ret = cmd_q->ccp->vdata->perform->aes(&op);
808 cmd->engine_error = cmd_q->cmd_error;
812 ccp_process_data(&src, &dst, &op);
817 /* Step 3: Update the IV portion of the context with the original IV */
818 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
819 CCP_PASSTHRU_BYTESWAP_256BIT);
821 cmd->engine_error = cmd_q->cmd_error;
825 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
829 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
830 CCP_PASSTHRU_BYTESWAP_256BIT);
832 cmd->engine_error = cmd_q->cmd_error;
836 /* Step 4: Concatenate the lengths of the AAD and source, and
837 * hash that 16 byte buffer.
839 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
843 final = (__be64 *)final_wa.address;
844 final[0] = cpu_to_be64(aes->aad_len * 8);
845 final[1] = cpu_to_be64(ilen * 8);
847 memset(&op, 0, sizeof(op));
850 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
851 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
853 op.u.aes.type = aes->type;
854 op.u.aes.mode = CCP_AES_MODE_GHASH;
855 op.u.aes.action = CCP_AES_GHASHFINAL;
856 op.src.type = CCP_MEMTYPE_SYSTEM;
857 op.src.u.dma.address = final_wa.dma.address;
858 op.src.u.dma.length = AES_BLOCK_SIZE;
859 op.dst.type = CCP_MEMTYPE_SYSTEM;
860 op.dst.u.dma.address = final_wa.dma.address;
861 op.dst.u.dma.length = AES_BLOCK_SIZE;
864 ret = cmd_q->ccp->vdata->perform->aes(&op);
868 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
869 /* Put the ciphered tag after the ciphertext. */
870 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
872 /* Does this ciphered tag match the input? */
873 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
877 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
881 ret = crypto_memneq(tag.address, final_wa.address,
882 authsize) ? -EBADMSG : 0;
887 ccp_dm_free(&final_wa);
890 if (ilen > 0 && !in_place)
891 ccp_free_data(&dst, cmd_q);
895 ccp_free_data(&src, cmd_q);
899 ccp_free_data(&aad, cmd_q);
910 static noinline_for_stack int
911 ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
913 struct ccp_aes_engine *aes = &cmd->u.aes;
914 struct ccp_dm_workarea key, ctx;
915 struct ccp_data src, dst;
917 unsigned int dm_offset;
918 bool in_place = false;
921 if (!((aes->key_len == AES_KEYSIZE_128) ||
922 (aes->key_len == AES_KEYSIZE_192) ||
923 (aes->key_len == AES_KEYSIZE_256)))
926 if (((aes->mode == CCP_AES_MODE_ECB) ||
927 (aes->mode == CCP_AES_MODE_CBC)) &&
928 (aes->src_len & (AES_BLOCK_SIZE - 1)))
931 if (!aes->key || !aes->src || !aes->dst)
934 if (aes->mode != CCP_AES_MODE_ECB) {
935 if (aes->iv_len != AES_BLOCK_SIZE)
942 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
943 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
946 memset(&op, 0, sizeof(op));
948 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
949 op.sb_key = cmd_q->sb_key;
950 op.sb_ctx = cmd_q->sb_ctx;
951 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
952 op.u.aes.type = aes->type;
953 op.u.aes.mode = aes->mode;
954 op.u.aes.action = aes->action;
956 /* All supported key sizes fit in a single (32-byte) SB entry
957 * and must be in little endian format. Use the 256-bit byte
958 * swap passthru option to convert from big endian to little
961 ret = ccp_init_dm_workarea(&key, cmd_q,
962 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
967 dm_offset = CCP_SB_BYTES - aes->key_len;
968 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
971 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
972 CCP_PASSTHRU_BYTESWAP_256BIT);
974 cmd->engine_error = cmd_q->cmd_error;
978 /* The AES context fits in a single (32-byte) SB entry and
979 * must be in little endian format. Use the 256-bit byte swap
980 * passthru option to convert from big endian to little endian.
982 ret = ccp_init_dm_workarea(&ctx, cmd_q,
983 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
988 if (aes->mode != CCP_AES_MODE_ECB) {
989 /* Load the AES context - convert to LE */
990 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
991 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
994 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
995 CCP_PASSTHRU_BYTESWAP_256BIT);
997 cmd->engine_error = cmd_q->cmd_error;
1001 switch (aes->mode) {
1002 case CCP_AES_MODE_CFB: /* CFB128 only */
1003 case CCP_AES_MODE_CTR:
1004 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
1010 /* Prepare the input and output data workareas. For in-place
1011 * operations we need to set the dma direction to BIDIRECTIONAL
1012 * and copy the src workarea to the dst workarea.
1014 if (sg_virt(aes->src) == sg_virt(aes->dst))
1017 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
1019 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1026 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
1027 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1032 /* Send data to the CCP AES engine */
1033 while (src.sg_wa.bytes_left) {
1034 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1035 if (!src.sg_wa.bytes_left) {
1038 /* Since we don't retrieve the AES context in ECB
1039 * mode we have to wait for the operation to complete
1040 * on the last piece of data
1042 if (aes->mode == CCP_AES_MODE_ECB)
1046 ret = cmd_q->ccp->vdata->perform->aes(&op);
1048 cmd->engine_error = cmd_q->cmd_error;
1052 ccp_process_data(&src, &dst, &op);
1055 if (aes->mode != CCP_AES_MODE_ECB) {
1056 /* Retrieve the AES context - convert from LE to BE using
1057 * 32-byte (256-bit) byteswapping
1059 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1060 CCP_PASSTHRU_BYTESWAP_256BIT);
1062 cmd->engine_error = cmd_q->cmd_error;
1066 /* ...but we only need AES_BLOCK_SIZE bytes */
1067 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1068 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1073 ccp_free_data(&dst, cmd_q);
1076 ccp_free_data(&src, cmd_q);
1087 static noinline_for_stack int
1088 ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1090 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1091 struct ccp_dm_workarea key, ctx;
1092 struct ccp_data src, dst;
1094 unsigned int unit_size, dm_offset;
1095 bool in_place = false;
1096 unsigned int sb_count;
1097 enum ccp_aes_type aestype;
1100 switch (xts->unit_size) {
1101 case CCP_XTS_AES_UNIT_SIZE_16:
1104 case CCP_XTS_AES_UNIT_SIZE_512:
1107 case CCP_XTS_AES_UNIT_SIZE_1024:
1110 case CCP_XTS_AES_UNIT_SIZE_2048:
1113 case CCP_XTS_AES_UNIT_SIZE_4096:
1121 if (xts->key_len == AES_KEYSIZE_128)
1122 aestype = CCP_AES_TYPE_128;
1123 else if (xts->key_len == AES_KEYSIZE_256)
1124 aestype = CCP_AES_TYPE_256;
1128 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1131 if (xts->iv_len != AES_BLOCK_SIZE)
1134 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1137 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1138 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1141 memset(&op, 0, sizeof(op));
1143 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1144 op.sb_key = cmd_q->sb_key;
1145 op.sb_ctx = cmd_q->sb_ctx;
1147 op.u.xts.type = aestype;
1148 op.u.xts.action = xts->action;
1149 op.u.xts.unit_size = xts->unit_size;
1151 /* A version 3 device only supports 128-bit keys, which fits into a
1152 * single SB entry. A version 5 device uses a 512-bit vector, so two
1155 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1156 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1158 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
1159 ret = ccp_init_dm_workarea(&key, cmd_q,
1160 sb_count * CCP_SB_BYTES,
1165 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1166 /* All supported key sizes must be in little endian format.
1167 * Use the 256-bit byte swap passthru option to convert from
1168 * big endian to little endian.
1170 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1171 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1174 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1178 /* Version 5 CCPs use a 512-bit space for the key: each portion
1179 * occupies 256 bits, or one entire slot, and is zero-padded.
1183 dm_offset = CCP_SB_BYTES;
1184 pad = dm_offset - xts->key_len;
1185 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1188 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1189 xts->key_len, xts->key_len);
1193 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1194 CCP_PASSTHRU_BYTESWAP_256BIT);
1196 cmd->engine_error = cmd_q->cmd_error;
1200 /* The AES context fits in a single (32-byte) SB entry and
1201 * for XTS is already in little endian format so no byte swapping
1204 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1205 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1210 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1213 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1214 CCP_PASSTHRU_BYTESWAP_NOOP);
1216 cmd->engine_error = cmd_q->cmd_error;
1220 /* Prepare the input and output data workareas. For in-place
1221 * operations we need to set the dma direction to BIDIRECTIONAL
1222 * and copy the src workarea to the dst workarea.
1224 if (sg_virt(xts->src) == sg_virt(xts->dst))
1227 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1229 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1236 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1237 unit_size, DMA_FROM_DEVICE);
1242 /* Send data to the CCP AES engine */
1243 while (src.sg_wa.bytes_left) {
1244 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1245 if (!src.sg_wa.bytes_left)
1248 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1250 cmd->engine_error = cmd_q->cmd_error;
1254 ccp_process_data(&src, &dst, &op);
1257 /* Retrieve the AES context - convert from LE to BE using
1258 * 32-byte (256-bit) byteswapping
1260 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1261 CCP_PASSTHRU_BYTESWAP_256BIT);
1263 cmd->engine_error = cmd_q->cmd_error;
1267 /* ...but we only need AES_BLOCK_SIZE bytes */
1268 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1269 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1273 ccp_free_data(&dst, cmd_q);
1276 ccp_free_data(&src, cmd_q);
1287 static noinline_for_stack int
1288 ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1290 struct ccp_des3_engine *des3 = &cmd->u.des3;
1292 struct ccp_dm_workarea key, ctx;
1293 struct ccp_data src, dst;
1295 unsigned int dm_offset;
1296 unsigned int len_singlekey;
1297 bool in_place = false;
1301 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1304 if (!cmd_q->ccp->vdata->perform->des3)
1307 if (des3->key_len != DES3_EDE_KEY_SIZE)
1310 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1311 (des3->mode == CCP_DES3_MODE_CBC)) &&
1312 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1315 if (!des3->key || !des3->src || !des3->dst)
1318 if (des3->mode != CCP_DES3_MODE_ECB) {
1319 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1326 /* Zero out all the fields of the command desc */
1327 memset(&op, 0, sizeof(op));
1329 /* Set up the Function field */
1331 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1332 op.sb_key = cmd_q->sb_key;
1334 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1335 op.u.des3.type = des3->type;
1336 op.u.des3.mode = des3->mode;
1337 op.u.des3.action = des3->action;
1340 * All supported key sizes fit in a single (32-byte) KSB entry and
1341 * (like AES) must be in little endian format. Use the 256-bit byte
1342 * swap passthru option to convert from big endian to little endian.
1344 ret = ccp_init_dm_workarea(&key, cmd_q,
1345 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1351 * The contents of the key triplet are in the reverse order of what
1352 * is required by the engine. Copy the 3 pieces individually to put
1353 * them where they belong.
1355 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1357 len_singlekey = des3->key_len / 3;
1358 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1359 des3->key, 0, len_singlekey);
1362 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1363 des3->key, len_singlekey, len_singlekey);
1366 ret = ccp_set_dm_area(&key, dm_offset,
1367 des3->key, 2 * len_singlekey, len_singlekey);
1371 /* Copy the key to the SB */
1372 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1373 CCP_PASSTHRU_BYTESWAP_256BIT);
1375 cmd->engine_error = cmd_q->cmd_error;
1380 * The DES3 context fits in a single (32-byte) KSB entry and
1381 * must be in little endian format. Use the 256-bit byte swap
1382 * passthru option to convert from big endian to little endian.
1384 if (des3->mode != CCP_DES3_MODE_ECB) {
1385 op.sb_ctx = cmd_q->sb_ctx;
1387 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1388 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1393 /* Load the context into the LSB */
1394 dm_offset = CCP_SB_BYTES - des3->iv_len;
1395 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1400 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1401 CCP_PASSTHRU_BYTESWAP_256BIT);
1403 cmd->engine_error = cmd_q->cmd_error;
1409 * Prepare the input and output data workareas. For in-place
1410 * operations we need to set the dma direction to BIDIRECTIONAL
1411 * and copy the src workarea to the dst workarea.
1413 if (sg_virt(des3->src) == sg_virt(des3->dst))
1416 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1417 DES3_EDE_BLOCK_SIZE,
1418 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1425 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1426 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1431 /* Send data to the CCP DES3 engine */
1432 while (src.sg_wa.bytes_left) {
1433 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1434 if (!src.sg_wa.bytes_left) {
1437 /* Since we don't retrieve the context in ECB mode
1438 * we have to wait for the operation to complete
1439 * on the last piece of data
1444 ret = cmd_q->ccp->vdata->perform->des3(&op);
1446 cmd->engine_error = cmd_q->cmd_error;
1450 ccp_process_data(&src, &dst, &op);
1453 if (des3->mode != CCP_DES3_MODE_ECB) {
1454 /* Retrieve the context and make BE */
1455 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1456 CCP_PASSTHRU_BYTESWAP_256BIT);
1458 cmd->engine_error = cmd_q->cmd_error;
1462 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1463 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1464 DES3_EDE_BLOCK_SIZE);
1468 ccp_free_data(&dst, cmd_q);
1471 ccp_free_data(&src, cmd_q);
1474 if (des3->mode != CCP_DES3_MODE_ECB)
1483 static noinline_for_stack int
1484 ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1486 struct ccp_sha_engine *sha = &cmd->u.sha;
1487 struct ccp_dm_workarea ctx;
1488 struct ccp_data src;
1490 unsigned int ioffset, ooffset;
1491 unsigned int digest_size;
1498 switch (sha->type) {
1499 case CCP_SHA_TYPE_1:
1500 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1502 block_size = SHA1_BLOCK_SIZE;
1504 case CCP_SHA_TYPE_224:
1505 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1507 block_size = SHA224_BLOCK_SIZE;
1509 case CCP_SHA_TYPE_256:
1510 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1512 block_size = SHA256_BLOCK_SIZE;
1514 case CCP_SHA_TYPE_384:
1515 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1516 || sha->ctx_len < SHA384_DIGEST_SIZE)
1518 block_size = SHA384_BLOCK_SIZE;
1520 case CCP_SHA_TYPE_512:
1521 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1522 || sha->ctx_len < SHA512_DIGEST_SIZE)
1524 block_size = SHA512_BLOCK_SIZE;
1533 if (!sha->final && (sha->src_len & (block_size - 1)))
1536 /* The version 3 device can't handle zero-length input */
1537 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1539 if (!sha->src_len) {
1540 unsigned int digest_len;
1543 /* Not final, just return */
1547 /* CCP can't do a zero length sha operation so the
1548 * caller must buffer the data.
1553 /* The CCP cannot perform zero-length sha operations
1554 * so the caller is required to buffer data for the
1555 * final operation. However, a sha operation for a
1556 * message with a total length of zero is valid so
1557 * known values are required to supply the result.
1559 switch (sha->type) {
1560 case CCP_SHA_TYPE_1:
1561 sha_zero = sha1_zero_message_hash;
1562 digest_len = SHA1_DIGEST_SIZE;
1564 case CCP_SHA_TYPE_224:
1565 sha_zero = sha224_zero_message_hash;
1566 digest_len = SHA224_DIGEST_SIZE;
1568 case CCP_SHA_TYPE_256:
1569 sha_zero = sha256_zero_message_hash;
1570 digest_len = SHA256_DIGEST_SIZE;
1576 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1583 /* Set variables used throughout */
1584 switch (sha->type) {
1585 case CCP_SHA_TYPE_1:
1586 digest_size = SHA1_DIGEST_SIZE;
1587 init = (void *) ccp_sha1_init;
1588 ctx_size = SHA1_DIGEST_SIZE;
1590 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1591 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1593 ooffset = ioffset = 0;
1595 case CCP_SHA_TYPE_224:
1596 digest_size = SHA224_DIGEST_SIZE;
1597 init = (void *) ccp_sha224_init;
1598 ctx_size = SHA256_DIGEST_SIZE;
1601 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1602 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1606 case CCP_SHA_TYPE_256:
1607 digest_size = SHA256_DIGEST_SIZE;
1608 init = (void *) ccp_sha256_init;
1609 ctx_size = SHA256_DIGEST_SIZE;
1611 ooffset = ioffset = 0;
1613 case CCP_SHA_TYPE_384:
1614 digest_size = SHA384_DIGEST_SIZE;
1615 init = (void *) ccp_sha384_init;
1616 ctx_size = SHA512_DIGEST_SIZE;
1619 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1621 case CCP_SHA_TYPE_512:
1622 digest_size = SHA512_DIGEST_SIZE;
1623 init = (void *) ccp_sha512_init;
1624 ctx_size = SHA512_DIGEST_SIZE;
1626 ooffset = ioffset = 0;
1633 /* For zero-length plaintext the src pointer is ignored;
1634 * otherwise both parts must be valid
1636 if (sha->src_len && !sha->src)
1639 memset(&op, 0, sizeof(op));
1641 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1642 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1643 op.u.sha.type = sha->type;
1644 op.u.sha.msg_bits = sha->msg_bits;
1646 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1647 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1648 * first slot, and the left half in the second. Each portion must then
1649 * be in little endian format: use the 256-bit byte swap option.
1651 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1656 switch (sha->type) {
1657 case CCP_SHA_TYPE_1:
1658 case CCP_SHA_TYPE_224:
1659 case CCP_SHA_TYPE_256:
1660 memcpy(ctx.address + ioffset, init, ctx_size);
1662 case CCP_SHA_TYPE_384:
1663 case CCP_SHA_TYPE_512:
1664 memcpy(ctx.address + ctx_size / 2, init,
1666 memcpy(ctx.address, init + ctx_size / 2,
1674 /* Restore the context */
1675 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1676 sb_count * CCP_SB_BYTES);
1681 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1682 CCP_PASSTHRU_BYTESWAP_256BIT);
1684 cmd->engine_error = cmd_q->cmd_error;
1689 /* Send data to the CCP SHA engine; block_size is set above */
1690 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1691 block_size, DMA_TO_DEVICE);
1695 while (src.sg_wa.bytes_left) {
1696 ccp_prepare_data(&src, NULL, &op, block_size, false);
1697 if (sha->final && !src.sg_wa.bytes_left)
1700 ret = cmd_q->ccp->vdata->perform->sha(&op);
1702 cmd->engine_error = cmd_q->cmd_error;
1706 ccp_process_data(&src, NULL, &op);
1710 ret = cmd_q->ccp->vdata->perform->sha(&op);
1712 cmd->engine_error = cmd_q->cmd_error;
1717 /* Retrieve the SHA context - convert from LE to BE using
1718 * 32-byte (256-bit) byteswapping to BE
1720 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1721 CCP_PASSTHRU_BYTESWAP_256BIT);
1723 cmd->engine_error = cmd_q->cmd_error;
1728 /* Finishing up, so get the digest */
1729 switch (sha->type) {
1730 case CCP_SHA_TYPE_1:
1731 case CCP_SHA_TYPE_224:
1732 case CCP_SHA_TYPE_256:
1733 ccp_get_dm_area(&ctx, ooffset,
1737 case CCP_SHA_TYPE_384:
1738 case CCP_SHA_TYPE_512:
1739 ccp_get_dm_area(&ctx, 0,
1740 sha->ctx, LSB_ITEM_SIZE - ooffset,
1742 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1744 LSB_ITEM_SIZE - ooffset);
1751 /* Stash the context */
1752 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1753 sb_count * CCP_SB_BYTES);
1756 if (sha->final && sha->opad) {
1757 /* HMAC operation, recursively perform final SHA */
1758 struct ccp_cmd hmac_cmd;
1759 struct scatterlist sg;
1762 if (sha->opad_len != block_size) {
1767 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1772 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1774 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1775 switch (sha->type) {
1776 case CCP_SHA_TYPE_1:
1777 case CCP_SHA_TYPE_224:
1778 case CCP_SHA_TYPE_256:
1779 memcpy(hmac_buf + block_size,
1780 ctx.address + ooffset,
1783 case CCP_SHA_TYPE_384:
1784 case CCP_SHA_TYPE_512:
1785 memcpy(hmac_buf + block_size,
1786 ctx.address + LSB_ITEM_SIZE + ooffset,
1788 memcpy(hmac_buf + block_size +
1789 (LSB_ITEM_SIZE - ooffset),
1799 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1800 hmac_cmd.engine = CCP_ENGINE_SHA;
1801 hmac_cmd.u.sha.type = sha->type;
1802 hmac_cmd.u.sha.ctx = sha->ctx;
1803 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1804 hmac_cmd.u.sha.src = &sg;
1805 hmac_cmd.u.sha.src_len = block_size + digest_size;
1806 hmac_cmd.u.sha.opad = NULL;
1807 hmac_cmd.u.sha.opad_len = 0;
1808 hmac_cmd.u.sha.first = 1;
1809 hmac_cmd.u.sha.final = 1;
1810 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1812 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1814 cmd->engine_error = hmac_cmd.engine_error;
1821 ccp_free_data(&src, cmd_q);
1829 static noinline_for_stack int
1830 ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1832 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1833 struct ccp_dm_workarea exp, src, dst;
1835 unsigned int sb_count, i_len, o_len;
1838 /* Check against the maximum allowable size, in bits */
1839 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
1842 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1845 memset(&op, 0, sizeof(op));
1847 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1849 /* The RSA modulus must precede the message being acted upon, so
1850 * it must be copied to a DMA area where the message and the
1851 * modulus can be concatenated. Therefore the input buffer
1852 * length required is twice the output buffer length (which
1853 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1854 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1857 o_len = 32 * ((rsa->key_size + 255) / 256);
1861 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1862 /* sb_count is the number of storage block slots required
1865 sb_count = o_len / CCP_SB_BYTES;
1866 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1871 /* A version 5 device allows a modulus size that will not fit
1872 * in the LSB, so the command will transfer it from memory.
1873 * Set the sb key to the default, even though it's not used.
1875 op.sb_key = cmd_q->sb_key;
1878 /* The RSA exponent must be in little endian format. Reverse its
1881 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1885 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1889 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1890 /* Copy the exponent to the local storage block, using
1891 * as many 32-byte blocks as were allocated above. It's
1892 * already little endian, so no further change is required.
1894 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1895 CCP_PASSTHRU_BYTESWAP_NOOP);
1897 cmd->engine_error = cmd_q->cmd_error;
1901 /* The exponent can be retrieved from memory via DMA. */
1902 op.exp.u.dma.address = exp.dma.address;
1903 op.exp.u.dma.offset = 0;
1906 /* Concatenate the modulus and the message. Both the modulus and
1907 * the operands must be in little endian format. Since the input
1908 * is in big endian format it must be converted.
1910 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1914 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1917 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1921 /* Prepare the output area for the operation */
1922 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
1927 op.src.u.dma.address = src.dma.address;
1928 op.src.u.dma.offset = 0;
1929 op.src.u.dma.length = i_len;
1930 op.dst.u.dma.address = dst.dma.address;
1931 op.dst.u.dma.offset = 0;
1932 op.dst.u.dma.length = o_len;
1934 op.u.rsa.mod_size = rsa->key_size;
1935 op.u.rsa.input_len = i_len;
1937 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1939 cmd->engine_error = cmd_q->cmd_error;
1943 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
1956 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1961 static noinline_for_stack int
1962 ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1964 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1965 struct ccp_dm_workarea mask;
1966 struct ccp_data src, dst;
1968 bool in_place = false;
1972 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1975 if (!pt->src || !pt->dst)
1978 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1979 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1985 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1987 memset(&op, 0, sizeof(op));
1989 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1991 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1993 op.sb_key = cmd_q->sb_key;
1995 ret = ccp_init_dm_workarea(&mask, cmd_q,
1996 CCP_PASSTHRU_SB_COUNT *
2002 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
2005 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2006 CCP_PASSTHRU_BYTESWAP_NOOP);
2008 cmd->engine_error = cmd_q->cmd_error;
2013 /* Prepare the input and output data workareas. For in-place
2014 * operations we need to set the dma direction to BIDIRECTIONAL
2015 * and copy the src workarea to the dst workarea.
2017 if (sg_virt(pt->src) == sg_virt(pt->dst))
2020 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
2021 CCP_PASSTHRU_MASKSIZE,
2022 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2029 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2030 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2035 /* Send data to the CCP Passthru engine
2036 * Because the CCP engine works on a single source and destination
2037 * dma address at a time, each entry in the source scatterlist
2038 * (after the dma_map_sg call) must be less than or equal to the
2039 * (remaining) length in the destination scatterlist entry and the
2040 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2042 dst.sg_wa.sg_used = 0;
2043 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2044 if (!dst.sg_wa.sg ||
2045 (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
2050 if (i == src.sg_wa.dma_count) {
2055 op.src.type = CCP_MEMTYPE_SYSTEM;
2056 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2057 op.src.u.dma.offset = 0;
2058 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2060 op.dst.type = CCP_MEMTYPE_SYSTEM;
2061 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
2062 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2063 op.dst.u.dma.length = op.src.u.dma.length;
2065 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2067 cmd->engine_error = cmd_q->cmd_error;
2071 dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
2072 if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
2073 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2074 dst.sg_wa.sg_used = 0;
2076 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2081 ccp_free_data(&dst, cmd_q);
2084 ccp_free_data(&src, cmd_q);
2087 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2093 static noinline_for_stack int
2094 ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2095 struct ccp_cmd *cmd)
2097 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2098 struct ccp_dm_workarea mask;
2102 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2105 if (!pt->src_dma || !pt->dst_dma)
2108 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2109 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2115 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
2117 memset(&op, 0, sizeof(op));
2119 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2121 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2123 op.sb_key = cmd_q->sb_key;
2125 mask.length = pt->mask_len;
2126 mask.dma.address = pt->mask;
2127 mask.dma.length = pt->mask_len;
2129 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2130 CCP_PASSTHRU_BYTESWAP_NOOP);
2132 cmd->engine_error = cmd_q->cmd_error;
2137 /* Send data to the CCP Passthru engine */
2141 op.src.type = CCP_MEMTYPE_SYSTEM;
2142 op.src.u.dma.address = pt->src_dma;
2143 op.src.u.dma.offset = 0;
2144 op.src.u.dma.length = pt->src_len;
2146 op.dst.type = CCP_MEMTYPE_SYSTEM;
2147 op.dst.u.dma.address = pt->dst_dma;
2148 op.dst.u.dma.offset = 0;
2149 op.dst.u.dma.length = pt->src_len;
2151 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2153 cmd->engine_error = cmd_q->cmd_error;
2158 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2160 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2161 struct ccp_dm_workarea src, dst;
2166 if (!ecc->u.mm.operand_1 ||
2167 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2170 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2171 if (!ecc->u.mm.operand_2 ||
2172 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2175 if (!ecc->u.mm.result ||
2176 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2179 memset(&op, 0, sizeof(op));
2181 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2183 /* Concatenate the modulus and the operands. Both the modulus and
2184 * the operands must be in little endian format. Since the input
2185 * is in big endian format it must be converted and placed in a
2186 * fixed length buffer.
2188 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2193 /* Save the workarea address since it is updated in order to perform
2198 /* Copy the ECC modulus */
2199 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2202 src.address += CCP_ECC_OPERAND_SIZE;
2204 /* Copy the first operand */
2205 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2206 ecc->u.mm.operand_1_len);
2209 src.address += CCP_ECC_OPERAND_SIZE;
2211 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2212 /* Copy the second operand */
2213 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2214 ecc->u.mm.operand_2_len);
2217 src.address += CCP_ECC_OPERAND_SIZE;
2220 /* Restore the workarea address */
2223 /* Prepare the output area for the operation */
2224 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2230 op.src.u.dma.address = src.dma.address;
2231 op.src.u.dma.offset = 0;
2232 op.src.u.dma.length = src.length;
2233 op.dst.u.dma.address = dst.dma.address;
2234 op.dst.u.dma.offset = 0;
2235 op.dst.u.dma.length = dst.length;
2237 op.u.ecc.function = cmd->u.ecc.function;
2239 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2241 cmd->engine_error = cmd_q->cmd_error;
2245 ecc->ecc_result = le16_to_cpup(
2246 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2247 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2252 /* Save the ECC result */
2253 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2254 CCP_ECC_MODULUS_BYTES);
2265 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2267 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2268 struct ccp_dm_workarea src, dst;
2273 if (!ecc->u.pm.point_1.x ||
2274 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2275 !ecc->u.pm.point_1.y ||
2276 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2279 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2280 if (!ecc->u.pm.point_2.x ||
2281 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2282 !ecc->u.pm.point_2.y ||
2283 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2286 if (!ecc->u.pm.domain_a ||
2287 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2290 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2291 if (!ecc->u.pm.scalar ||
2292 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2296 if (!ecc->u.pm.result.x ||
2297 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2298 !ecc->u.pm.result.y ||
2299 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2302 memset(&op, 0, sizeof(op));
2304 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2306 /* Concatenate the modulus and the operands. Both the modulus and
2307 * the operands must be in little endian format. Since the input
2308 * is in big endian format it must be converted and placed in a
2309 * fixed length buffer.
2311 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2316 /* Save the workarea address since it is updated in order to perform
2321 /* Copy the ECC modulus */
2322 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2325 src.address += CCP_ECC_OPERAND_SIZE;
2327 /* Copy the first point X and Y coordinate */
2328 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2329 ecc->u.pm.point_1.x_len);
2332 src.address += CCP_ECC_OPERAND_SIZE;
2333 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2334 ecc->u.pm.point_1.y_len);
2337 src.address += CCP_ECC_OPERAND_SIZE;
2339 /* Set the first point Z coordinate to 1 */
2340 *src.address = 0x01;
2341 src.address += CCP_ECC_OPERAND_SIZE;
2343 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2344 /* Copy the second point X and Y coordinate */
2345 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2346 ecc->u.pm.point_2.x_len);
2349 src.address += CCP_ECC_OPERAND_SIZE;
2350 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2351 ecc->u.pm.point_2.y_len);
2354 src.address += CCP_ECC_OPERAND_SIZE;
2356 /* Set the second point Z coordinate to 1 */
2357 *src.address = 0x01;
2358 src.address += CCP_ECC_OPERAND_SIZE;
2360 /* Copy the Domain "a" parameter */
2361 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2362 ecc->u.pm.domain_a_len);
2365 src.address += CCP_ECC_OPERAND_SIZE;
2367 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2368 /* Copy the scalar value */
2369 ret = ccp_reverse_set_dm_area(&src, 0,
2370 ecc->u.pm.scalar, 0,
2371 ecc->u.pm.scalar_len);
2374 src.address += CCP_ECC_OPERAND_SIZE;
2378 /* Restore the workarea address */
2381 /* Prepare the output area for the operation */
2382 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2388 op.src.u.dma.address = src.dma.address;
2389 op.src.u.dma.offset = 0;
2390 op.src.u.dma.length = src.length;
2391 op.dst.u.dma.address = dst.dma.address;
2392 op.dst.u.dma.offset = 0;
2393 op.dst.u.dma.length = dst.length;
2395 op.u.ecc.function = cmd->u.ecc.function;
2397 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2399 cmd->engine_error = cmd_q->cmd_error;
2403 ecc->ecc_result = le16_to_cpup(
2404 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2405 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2410 /* Save the workarea address since it is updated as we walk through
2411 * to copy the point math result
2415 /* Save the ECC result X and Y coordinates */
2416 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2417 CCP_ECC_MODULUS_BYTES);
2418 dst.address += CCP_ECC_OUTPUT_SIZE;
2419 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2420 CCP_ECC_MODULUS_BYTES);
2421 dst.address += CCP_ECC_OUTPUT_SIZE;
2423 /* Restore the workarea address */
2435 static noinline_for_stack int
2436 ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2438 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2440 ecc->ecc_result = 0;
2443 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2446 switch (ecc->function) {
2447 case CCP_ECC_FUNCTION_MMUL_384BIT:
2448 case CCP_ECC_FUNCTION_MADD_384BIT:
2449 case CCP_ECC_FUNCTION_MINV_384BIT:
2450 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2452 case CCP_ECC_FUNCTION_PADD_384BIT:
2453 case CCP_ECC_FUNCTION_PMUL_384BIT:
2454 case CCP_ECC_FUNCTION_PDBL_384BIT:
2455 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2462 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2466 cmd->engine_error = 0;
2467 cmd_q->cmd_error = 0;
2468 cmd_q->int_rcvd = 0;
2469 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2471 switch (cmd->engine) {
2472 case CCP_ENGINE_AES:
2473 switch (cmd->u.aes.mode) {
2474 case CCP_AES_MODE_CMAC:
2475 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2477 case CCP_AES_MODE_GCM:
2478 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2481 ret = ccp_run_aes_cmd(cmd_q, cmd);
2485 case CCP_ENGINE_XTS_AES_128:
2486 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2488 case CCP_ENGINE_DES3:
2489 ret = ccp_run_des3_cmd(cmd_q, cmd);
2491 case CCP_ENGINE_SHA:
2492 ret = ccp_run_sha_cmd(cmd_q, cmd);
2494 case CCP_ENGINE_RSA:
2495 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2497 case CCP_ENGINE_PASSTHRU:
2498 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2499 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2501 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2503 case CCP_ENGINE_ECC:
2504 ret = ccp_run_ecc_cmd(cmd_q, cmd);