2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/interrupt.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/ccp.h>
23 /* SHA initial context values */
24 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
25 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
30 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
31 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
37 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
38 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
44 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
45 ccp_gen_jobid(ccp) : 0)
47 static u32 ccp_gen_jobid(struct ccp_device *ccp)
49 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
52 static void ccp_sg_free(struct ccp_sg_workarea *wa)
55 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
60 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
61 struct scatterlist *sg, u64 len,
62 enum dma_data_direction dma_dir)
64 memset(wa, 0, sizeof(*wa));
70 wa->nents = sg_nents_for_len(sg, len);
80 if (dma_dir == DMA_NONE)
85 wa->dma_dir = dma_dir;
86 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
93 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
95 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
100 wa->sg_used += nbytes;
101 wa->bytes_left -= nbytes;
102 if (wa->sg_used == wa->sg->length) {
103 wa->sg = sg_next(wa->sg);
108 static void ccp_dm_free(struct ccp_dm_workarea *wa)
110 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
112 dma_pool_free(wa->dma_pool, wa->address,
116 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
125 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
126 struct ccp_cmd_queue *cmd_q,
128 enum dma_data_direction dir)
130 memset(wa, 0, sizeof(*wa));
135 wa->dev = cmd_q->ccp->dev;
138 if (len <= CCP_DMAPOOL_MAX_SIZE) {
139 wa->dma_pool = cmd_q->dma_pool;
141 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
146 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
148 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
150 wa->address = kzalloc(len, GFP_KERNEL);
154 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
156 if (!wa->dma.address)
159 wa->dma.length = len;
166 static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
167 struct scatterlist *sg, unsigned int sg_offset,
170 WARN_ON(!wa->address);
172 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
176 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
177 struct scatterlist *sg, unsigned int sg_offset,
180 WARN_ON(!wa->address);
182 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
186 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
187 unsigned int wa_offset,
188 struct scatterlist *sg,
189 unsigned int sg_offset,
194 ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
196 p = wa->address + wa_offset;
208 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
209 unsigned int wa_offset,
210 struct scatterlist *sg,
211 unsigned int sg_offset,
216 p = wa->address + wa_offset;
226 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
229 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
231 ccp_dm_free(&data->dm_wa);
232 ccp_sg_free(&data->sg_wa);
235 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
236 struct scatterlist *sg, u64 sg_len,
238 enum dma_data_direction dir)
242 memset(data, 0, sizeof(*data));
244 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
249 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
256 ccp_free_data(data, cmd_q);
261 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
263 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
264 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
265 unsigned int buf_count, nbytes;
267 /* Clear the buffer if setting it */
269 memset(dm_wa->address, 0, dm_wa->length);
274 /* Perform the copy operation
275 * nbytes will always be <= UINT_MAX because dm_wa->length is
278 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
279 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
282 /* Update the structures and generate the count */
284 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
285 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
286 dm_wa->length - buf_count);
287 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
290 ccp_update_sg_workarea(sg_wa, nbytes);
296 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
298 return ccp_queue_buf(data, 0);
301 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
303 return ccp_queue_buf(data, 1);
306 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
307 struct ccp_op *op, unsigned int block_size,
310 unsigned int sg_src_len, sg_dst_len, op_len;
312 /* The CCP can only DMA from/to one address each per operation. This
313 * requires that we find the smallest DMA area between the source
314 * and destination. The resulting len values will always be <= UINT_MAX
315 * because the dma length is an unsigned int.
317 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
318 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
321 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
322 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
323 op_len = min(sg_src_len, sg_dst_len);
328 /* The data operation length will be at least block_size in length
329 * or the smaller of available sg room remaining for the source or
332 op_len = max(op_len, block_size);
334 /* Unless we have to buffer data, there's no reason to wait */
337 if (sg_src_len < block_size) {
338 /* Not enough data in the sg element, so it
339 * needs to be buffered into a blocksize chunk
341 int cp_len = ccp_fill_queue_buf(src);
344 op->src.u.dma.address = src->dm_wa.dma.address;
345 op->src.u.dma.offset = 0;
346 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
348 /* Enough data in the sg element, but we need to
349 * adjust for any previously copied data
351 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
352 op->src.u.dma.offset = src->sg_wa.sg_used;
353 op->src.u.dma.length = op_len & ~(block_size - 1);
355 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
359 if (sg_dst_len < block_size) {
360 /* Not enough room in the sg element or we're on the
361 * last piece of data (when using padding), so the
362 * output needs to be buffered into a blocksize chunk
365 op->dst.u.dma.address = dst->dm_wa.dma.address;
366 op->dst.u.dma.offset = 0;
367 op->dst.u.dma.length = op->src.u.dma.length;
369 /* Enough room in the sg element, but we need to
370 * adjust for any previously used area
372 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
373 op->dst.u.dma.offset = dst->sg_wa.sg_used;
374 op->dst.u.dma.length = op->src.u.dma.length;
379 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
385 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
386 ccp_empty_queue_buf(dst);
388 ccp_update_sg_workarea(&dst->sg_wa,
389 op->dst.u.dma.length);
393 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
394 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
395 u32 byte_swap, bool from)
399 memset(&op, 0, sizeof(op));
407 op.src.type = CCP_MEMTYPE_SB;
409 op.dst.type = CCP_MEMTYPE_SYSTEM;
410 op.dst.u.dma.address = wa->dma.address;
411 op.dst.u.dma.length = wa->length;
413 op.src.type = CCP_MEMTYPE_SYSTEM;
414 op.src.u.dma.address = wa->dma.address;
415 op.src.u.dma.length = wa->length;
416 op.dst.type = CCP_MEMTYPE_SB;
420 op.u.passthru.byte_swap = byte_swap;
422 return cmd_q->ccp->vdata->perform->passthru(&op);
425 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
426 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
429 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
432 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
433 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
436 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
439 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
442 struct ccp_aes_engine *aes = &cmd->u.aes;
443 struct ccp_dm_workarea key, ctx;
446 unsigned int dm_offset;
449 if (!((aes->key_len == AES_KEYSIZE_128) ||
450 (aes->key_len == AES_KEYSIZE_192) ||
451 (aes->key_len == AES_KEYSIZE_256)))
454 if (aes->src_len & (AES_BLOCK_SIZE - 1))
457 if (aes->iv_len != AES_BLOCK_SIZE)
460 if (!aes->key || !aes->iv || !aes->src)
463 if (aes->cmac_final) {
464 if (aes->cmac_key_len != AES_BLOCK_SIZE)
471 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
472 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
475 memset(&op, 0, sizeof(op));
477 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
478 op.sb_key = cmd_q->sb_key;
479 op.sb_ctx = cmd_q->sb_ctx;
481 op.u.aes.type = aes->type;
482 op.u.aes.mode = aes->mode;
483 op.u.aes.action = aes->action;
485 /* All supported key sizes fit in a single (32-byte) SB entry
486 * and must be in little endian format. Use the 256-bit byte
487 * swap passthru option to convert from big endian to little
490 ret = ccp_init_dm_workarea(&key, cmd_q,
491 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
496 dm_offset = CCP_SB_BYTES - aes->key_len;
497 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
498 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
499 CCP_PASSTHRU_BYTESWAP_256BIT);
501 cmd->engine_error = cmd_q->cmd_error;
505 /* The AES context fits in a single (32-byte) SB entry and
506 * must be in little endian format. Use the 256-bit byte swap
507 * passthru option to convert from big endian to little endian.
509 ret = ccp_init_dm_workarea(&ctx, cmd_q,
510 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
515 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
516 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
517 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
518 CCP_PASSTHRU_BYTESWAP_256BIT);
520 cmd->engine_error = cmd_q->cmd_error;
524 /* Send data to the CCP AES engine */
525 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
526 AES_BLOCK_SIZE, DMA_TO_DEVICE);
530 while (src.sg_wa.bytes_left) {
531 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
532 if (aes->cmac_final && !src.sg_wa.bytes_left) {
535 /* Push the K1/K2 key to the CCP now */
536 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
538 CCP_PASSTHRU_BYTESWAP_256BIT);
540 cmd->engine_error = cmd_q->cmd_error;
544 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
546 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
547 CCP_PASSTHRU_BYTESWAP_256BIT);
549 cmd->engine_error = cmd_q->cmd_error;
554 ret = cmd_q->ccp->vdata->perform->aes(&op);
556 cmd->engine_error = cmd_q->cmd_error;
560 ccp_process_data(&src, NULL, &op);
563 /* Retrieve the AES context - convert from LE to BE using
564 * 32-byte (256-bit) byteswapping
566 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
567 CCP_PASSTHRU_BYTESWAP_256BIT);
569 cmd->engine_error = cmd_q->cmd_error;
573 /* ...but we only need AES_BLOCK_SIZE bytes */
574 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
575 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
578 ccp_free_data(&src, cmd_q);
589 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
591 struct ccp_aes_engine *aes = &cmd->u.aes;
592 struct ccp_dm_workarea key, ctx;
593 struct ccp_data src, dst;
595 unsigned int dm_offset;
596 bool in_place = false;
599 if (aes->mode == CCP_AES_MODE_CMAC)
600 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
602 if (!((aes->key_len == AES_KEYSIZE_128) ||
603 (aes->key_len == AES_KEYSIZE_192) ||
604 (aes->key_len == AES_KEYSIZE_256)))
607 if (((aes->mode == CCP_AES_MODE_ECB) ||
608 (aes->mode == CCP_AES_MODE_CBC) ||
609 (aes->mode == CCP_AES_MODE_CFB)) &&
610 (aes->src_len & (AES_BLOCK_SIZE - 1)))
613 if (!aes->key || !aes->src || !aes->dst)
616 if (aes->mode != CCP_AES_MODE_ECB) {
617 if (aes->iv_len != AES_BLOCK_SIZE)
624 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
625 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
628 memset(&op, 0, sizeof(op));
630 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
631 op.sb_key = cmd_q->sb_key;
632 op.sb_ctx = cmd_q->sb_ctx;
633 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
634 op.u.aes.type = aes->type;
635 op.u.aes.mode = aes->mode;
636 op.u.aes.action = aes->action;
638 /* All supported key sizes fit in a single (32-byte) SB entry
639 * and must be in little endian format. Use the 256-bit byte
640 * swap passthru option to convert from big endian to little
643 ret = ccp_init_dm_workarea(&key, cmd_q,
644 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
649 dm_offset = CCP_SB_BYTES - aes->key_len;
650 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
651 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
652 CCP_PASSTHRU_BYTESWAP_256BIT);
654 cmd->engine_error = cmd_q->cmd_error;
658 /* The AES context fits in a single (32-byte) SB entry and
659 * must be in little endian format. Use the 256-bit byte swap
660 * passthru option to convert from big endian to little endian.
662 ret = ccp_init_dm_workarea(&ctx, cmd_q,
663 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
668 if (aes->mode != CCP_AES_MODE_ECB) {
669 /* Load the AES context - convert to LE */
670 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
671 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
672 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
673 CCP_PASSTHRU_BYTESWAP_256BIT);
675 cmd->engine_error = cmd_q->cmd_error;
680 case CCP_AES_MODE_CFB: /* CFB128 only */
681 case CCP_AES_MODE_CTR:
682 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
688 /* Prepare the input and output data workareas. For in-place
689 * operations we need to set the dma direction to BIDIRECTIONAL
690 * and copy the src workarea to the dst workarea.
692 if (sg_virt(aes->src) == sg_virt(aes->dst))
695 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
697 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
704 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
705 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
710 /* Send data to the CCP AES engine */
711 while (src.sg_wa.bytes_left) {
712 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
713 if (!src.sg_wa.bytes_left) {
716 /* Since we don't retrieve the AES context in ECB
717 * mode we have to wait for the operation to complete
718 * on the last piece of data
720 if (aes->mode == CCP_AES_MODE_ECB)
724 ret = cmd_q->ccp->vdata->perform->aes(&op);
726 cmd->engine_error = cmd_q->cmd_error;
730 ccp_process_data(&src, &dst, &op);
733 if (aes->mode != CCP_AES_MODE_ECB) {
734 /* Retrieve the AES context - convert from LE to BE using
735 * 32-byte (256-bit) byteswapping
737 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
738 CCP_PASSTHRU_BYTESWAP_256BIT);
740 cmd->engine_error = cmd_q->cmd_error;
744 /* ...but we only need AES_BLOCK_SIZE bytes */
745 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
746 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
751 ccp_free_data(&dst, cmd_q);
754 ccp_free_data(&src, cmd_q);
765 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
768 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
769 struct ccp_dm_workarea key, ctx;
770 struct ccp_data src, dst;
772 unsigned int unit_size, dm_offset;
773 bool in_place = false;
776 switch (xts->unit_size) {
777 case CCP_XTS_AES_UNIT_SIZE_16:
780 case CCP_XTS_AES_UNIT_SIZE_512:
783 case CCP_XTS_AES_UNIT_SIZE_1024:
786 case CCP_XTS_AES_UNIT_SIZE_2048:
789 case CCP_XTS_AES_UNIT_SIZE_4096:
797 if (xts->key_len != AES_KEYSIZE_128)
800 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
803 if (xts->iv_len != AES_BLOCK_SIZE)
806 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
809 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
810 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
813 memset(&op, 0, sizeof(op));
815 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
816 op.sb_key = cmd_q->sb_key;
817 op.sb_ctx = cmd_q->sb_ctx;
819 op.u.xts.action = xts->action;
820 op.u.xts.unit_size = xts->unit_size;
822 /* All supported key sizes fit in a single (32-byte) SB entry
823 * and must be in little endian format. Use the 256-bit byte
824 * swap passthru option to convert from big endian to little
827 ret = ccp_init_dm_workarea(&key, cmd_q,
828 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
833 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
834 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
835 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
836 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
837 CCP_PASSTHRU_BYTESWAP_256BIT);
839 cmd->engine_error = cmd_q->cmd_error;
843 /* The AES context fits in a single (32-byte) SB entry and
844 * for XTS is already in little endian format so no byte swapping
847 ret = ccp_init_dm_workarea(&ctx, cmd_q,
848 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
853 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
854 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
855 CCP_PASSTHRU_BYTESWAP_NOOP);
857 cmd->engine_error = cmd_q->cmd_error;
861 /* Prepare the input and output data workareas. For in-place
862 * operations we need to set the dma direction to BIDIRECTIONAL
863 * and copy the src workarea to the dst workarea.
865 if (sg_virt(xts->src) == sg_virt(xts->dst))
868 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
870 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
877 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
878 unit_size, DMA_FROM_DEVICE);
883 /* Send data to the CCP AES engine */
884 while (src.sg_wa.bytes_left) {
885 ccp_prepare_data(&src, &dst, &op, unit_size, true);
886 if (!src.sg_wa.bytes_left)
889 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
891 cmd->engine_error = cmd_q->cmd_error;
895 ccp_process_data(&src, &dst, &op);
898 /* Retrieve the AES context - convert from LE to BE using
899 * 32-byte (256-bit) byteswapping
901 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
902 CCP_PASSTHRU_BYTESWAP_256BIT);
904 cmd->engine_error = cmd_q->cmd_error;
908 /* ...but we only need AES_BLOCK_SIZE bytes */
909 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
910 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
914 ccp_free_data(&dst, cmd_q);
917 ccp_free_data(&src, cmd_q);
928 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
930 struct ccp_sha_engine *sha = &cmd->u.sha;
931 struct ccp_dm_workarea ctx;
934 unsigned int ioffset, ooffset;
935 unsigned int digest_size;
944 if (sha->ctx_len < SHA1_DIGEST_SIZE)
946 block_size = SHA1_BLOCK_SIZE;
948 case CCP_SHA_TYPE_224:
949 if (sha->ctx_len < SHA224_DIGEST_SIZE)
951 block_size = SHA224_BLOCK_SIZE;
953 case CCP_SHA_TYPE_256:
954 if (sha->ctx_len < SHA256_DIGEST_SIZE)
956 block_size = SHA256_BLOCK_SIZE;
965 if (!sha->final && (sha->src_len & (block_size - 1)))
968 /* The version 3 device can't handle zero-length input */
969 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
972 unsigned int digest_len;
975 /* Not final, just return */
979 /* CCP can't do a zero length sha operation so the
980 * caller must buffer the data.
985 /* The CCP cannot perform zero-length sha operations
986 * so the caller is required to buffer data for the
987 * final operation. However, a sha operation for a
988 * message with a total length of zero is valid so
989 * known values are required to supply the result.
993 sha_zero = sha1_zero_message_hash;
994 digest_len = SHA1_DIGEST_SIZE;
996 case CCP_SHA_TYPE_224:
997 sha_zero = sha224_zero_message_hash;
998 digest_len = SHA224_DIGEST_SIZE;
1000 case CCP_SHA_TYPE_256:
1001 sha_zero = sha256_zero_message_hash;
1002 digest_len = SHA256_DIGEST_SIZE;
1008 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1015 /* Set variables used throughout */
1016 switch (sha->type) {
1017 case CCP_SHA_TYPE_1:
1018 digest_size = SHA1_DIGEST_SIZE;
1019 init = (void *) ccp_sha1_init;
1020 ctx_size = SHA1_DIGEST_SIZE;
1022 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1023 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1025 ooffset = ioffset = 0;
1027 case CCP_SHA_TYPE_224:
1028 digest_size = SHA224_DIGEST_SIZE;
1029 init = (void *) ccp_sha224_init;
1030 ctx_size = SHA256_DIGEST_SIZE;
1033 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1034 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1038 case CCP_SHA_TYPE_256:
1039 digest_size = SHA256_DIGEST_SIZE;
1040 init = (void *) ccp_sha256_init;
1041 ctx_size = SHA256_DIGEST_SIZE;
1043 ooffset = ioffset = 0;
1050 /* For zero-length plaintext the src pointer is ignored;
1051 * otherwise both parts must be valid
1053 if (sha->src_len && !sha->src)
1056 memset(&op, 0, sizeof(op));
1058 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1059 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1060 op.u.sha.type = sha->type;
1061 op.u.sha.msg_bits = sha->msg_bits;
1063 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1068 switch (sha->type) {
1069 case CCP_SHA_TYPE_1:
1070 case CCP_SHA_TYPE_224:
1071 case CCP_SHA_TYPE_256:
1072 memcpy(ctx.address + ioffset, init, ctx_size);
1079 /* Restore the context */
1080 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1081 sb_count * CCP_SB_BYTES);
1084 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1085 CCP_PASSTHRU_BYTESWAP_256BIT);
1087 cmd->engine_error = cmd_q->cmd_error;
1092 /* Send data to the CCP SHA engine; block_size is set above */
1093 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1094 block_size, DMA_TO_DEVICE);
1098 while (src.sg_wa.bytes_left) {
1099 ccp_prepare_data(&src, NULL, &op, block_size, false);
1100 if (sha->final && !src.sg_wa.bytes_left)
1103 ret = cmd_q->ccp->vdata->perform->sha(&op);
1105 cmd->engine_error = cmd_q->cmd_error;
1109 ccp_process_data(&src, NULL, &op);
1113 ret = cmd_q->ccp->vdata->perform->sha(&op);
1115 cmd->engine_error = cmd_q->cmd_error;
1120 /* Retrieve the SHA context - convert from LE to BE using
1121 * 32-byte (256-bit) byteswapping to BE
1123 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1124 CCP_PASSTHRU_BYTESWAP_256BIT);
1126 cmd->engine_error = cmd_q->cmd_error;
1131 /* Finishing up, so get the digest */
1132 switch (sha->type) {
1133 case CCP_SHA_TYPE_1:
1134 case CCP_SHA_TYPE_224:
1135 case CCP_SHA_TYPE_256:
1136 ccp_get_dm_area(&ctx, ooffset,
1145 /* Stash the context */
1146 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1147 sb_count * CCP_SB_BYTES);
1150 if (sha->final && sha->opad) {
1151 /* HMAC operation, recursively perform final SHA */
1152 struct ccp_cmd hmac_cmd;
1153 struct scatterlist sg;
1156 if (sha->opad_len != block_size) {
1161 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1166 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1168 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1169 switch (sha->type) {
1170 case CCP_SHA_TYPE_1:
1171 case CCP_SHA_TYPE_224:
1172 case CCP_SHA_TYPE_256:
1173 memcpy(hmac_buf + block_size,
1174 ctx.address + ooffset,
1182 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1183 hmac_cmd.engine = CCP_ENGINE_SHA;
1184 hmac_cmd.u.sha.type = sha->type;
1185 hmac_cmd.u.sha.ctx = sha->ctx;
1186 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1187 hmac_cmd.u.sha.src = &sg;
1188 hmac_cmd.u.sha.src_len = block_size + digest_size;
1189 hmac_cmd.u.sha.opad = NULL;
1190 hmac_cmd.u.sha.opad_len = 0;
1191 hmac_cmd.u.sha.first = 1;
1192 hmac_cmd.u.sha.final = 1;
1193 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1195 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1197 cmd->engine_error = hmac_cmd.engine_error;
1204 ccp_free_data(&src, cmd_q);
1212 static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1214 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1215 struct ccp_dm_workarea exp, src;
1216 struct ccp_data dst;
1218 unsigned int sb_count, i_len, o_len;
1221 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1224 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1227 /* The RSA modulus must precede the message being acted upon, so
1228 * it must be copied to a DMA area where the message and the
1229 * modulus can be concatenated. Therefore the input buffer
1230 * length required is twice the output buffer length (which
1231 * must be a multiple of 256-bits).
1233 o_len = ((rsa->key_size + 255) / 256) * 32;
1236 sb_count = o_len / CCP_SB_BYTES;
1238 memset(&op, 0, sizeof(op));
1240 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1241 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1246 /* The RSA exponent may span multiple (32-byte) SB entries and must
1247 * be in little endian format. Reverse copy each 32-byte chunk
1248 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1249 * and each byte within that chunk and do not perform any byte swap
1250 * operations on the passthru operation.
1252 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1256 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1259 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1260 CCP_PASSTHRU_BYTESWAP_NOOP);
1262 cmd->engine_error = cmd_q->cmd_error;
1266 /* Concatenate the modulus and the message. Both the modulus and
1267 * the operands must be in little endian format. Since the input
1268 * is in big endian format it must be converted.
1270 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1274 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1277 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1281 /* Prepare the output area for the operation */
1282 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1283 o_len, DMA_FROM_DEVICE);
1288 op.src.u.dma.address = src.dma.address;
1289 op.src.u.dma.offset = 0;
1290 op.src.u.dma.length = i_len;
1291 op.dst.u.dma.address = dst.dm_wa.dma.address;
1292 op.dst.u.dma.offset = 0;
1293 op.dst.u.dma.length = o_len;
1295 op.u.rsa.mod_size = rsa->key_size;
1296 op.u.rsa.input_len = i_len;
1298 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1300 cmd->engine_error = cmd_q->cmd_error;
1304 ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
1307 ccp_free_data(&dst, cmd_q);
1316 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1321 static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1322 struct ccp_cmd *cmd)
1324 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1325 struct ccp_dm_workarea mask;
1326 struct ccp_data src, dst;
1328 bool in_place = false;
1332 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1335 if (!pt->src || !pt->dst)
1338 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1339 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1345 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1347 memset(&op, 0, sizeof(op));
1349 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1351 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1353 op.sb_key = cmd_q->sb_key;
1355 ret = ccp_init_dm_workarea(&mask, cmd_q,
1356 CCP_PASSTHRU_SB_COUNT *
1362 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1363 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1364 CCP_PASSTHRU_BYTESWAP_NOOP);
1366 cmd->engine_error = cmd_q->cmd_error;
1371 /* Prepare the input and output data workareas. For in-place
1372 * operations we need to set the dma direction to BIDIRECTIONAL
1373 * and copy the src workarea to the dst workarea.
1375 if (sg_virt(pt->src) == sg_virt(pt->dst))
1378 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1379 CCP_PASSTHRU_MASKSIZE,
1380 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1387 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1388 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1393 /* Send data to the CCP Passthru engine
1394 * Because the CCP engine works on a single source and destination
1395 * dma address at a time, each entry in the source scatterlist
1396 * (after the dma_map_sg call) must be less than or equal to the
1397 * (remaining) length in the destination scatterlist entry and the
1398 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1400 dst.sg_wa.sg_used = 0;
1401 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1402 if (!dst.sg_wa.sg ||
1403 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1408 if (i == src.sg_wa.dma_count) {
1413 op.src.type = CCP_MEMTYPE_SYSTEM;
1414 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1415 op.src.u.dma.offset = 0;
1416 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1418 op.dst.type = CCP_MEMTYPE_SYSTEM;
1419 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
1420 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1421 op.dst.u.dma.length = op.src.u.dma.length;
1423 ret = cmd_q->ccp->vdata->perform->passthru(&op);
1425 cmd->engine_error = cmd_q->cmd_error;
1429 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1430 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1431 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1432 dst.sg_wa.sg_used = 0;
1434 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1439 ccp_free_data(&dst, cmd_q);
1442 ccp_free_data(&src, cmd_q);
1445 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1451 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1452 struct ccp_cmd *cmd)
1454 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1455 struct ccp_dm_workarea mask;
1459 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1462 if (!pt->src_dma || !pt->dst_dma)
1465 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1466 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1472 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1474 memset(&op, 0, sizeof(op));
1476 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1478 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1480 op.sb_key = cmd_q->sb_key;
1482 mask.length = pt->mask_len;
1483 mask.dma.address = pt->mask;
1484 mask.dma.length = pt->mask_len;
1486 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1487 CCP_PASSTHRU_BYTESWAP_NOOP);
1489 cmd->engine_error = cmd_q->cmd_error;
1494 /* Send data to the CCP Passthru engine */
1498 op.src.type = CCP_MEMTYPE_SYSTEM;
1499 op.src.u.dma.address = pt->src_dma;
1500 op.src.u.dma.offset = 0;
1501 op.src.u.dma.length = pt->src_len;
1503 op.dst.type = CCP_MEMTYPE_SYSTEM;
1504 op.dst.u.dma.address = pt->dst_dma;
1505 op.dst.u.dma.offset = 0;
1506 op.dst.u.dma.length = pt->src_len;
1508 ret = cmd_q->ccp->vdata->perform->passthru(&op);
1510 cmd->engine_error = cmd_q->cmd_error;
1515 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1517 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1518 struct ccp_dm_workarea src, dst;
1523 if (!ecc->u.mm.operand_1 ||
1524 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1527 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1528 if (!ecc->u.mm.operand_2 ||
1529 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1532 if (!ecc->u.mm.result ||
1533 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1536 memset(&op, 0, sizeof(op));
1538 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1540 /* Concatenate the modulus and the operands. Both the modulus and
1541 * the operands must be in little endian format. Since the input
1542 * is in big endian format it must be converted and placed in a
1543 * fixed length buffer.
1545 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1550 /* Save the workarea address since it is updated in order to perform
1555 /* Copy the ECC modulus */
1556 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
1559 src.address += CCP_ECC_OPERAND_SIZE;
1561 /* Copy the first operand */
1562 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
1563 ecc->u.mm.operand_1_len);
1566 src.address += CCP_ECC_OPERAND_SIZE;
1568 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1569 /* Copy the second operand */
1570 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
1571 ecc->u.mm.operand_2_len);
1574 src.address += CCP_ECC_OPERAND_SIZE;
1577 /* Restore the workarea address */
1580 /* Prepare the output area for the operation */
1581 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1587 op.src.u.dma.address = src.dma.address;
1588 op.src.u.dma.offset = 0;
1589 op.src.u.dma.length = src.length;
1590 op.dst.u.dma.address = dst.dma.address;
1591 op.dst.u.dma.offset = 0;
1592 op.dst.u.dma.length = dst.length;
1594 op.u.ecc.function = cmd->u.ecc.function;
1596 ret = cmd_q->ccp->vdata->perform->ecc(&op);
1598 cmd->engine_error = cmd_q->cmd_error;
1602 ecc->ecc_result = le16_to_cpup(
1603 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1604 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1609 /* Save the ECC result */
1610 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
1611 CCP_ECC_MODULUS_BYTES);
1622 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1624 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1625 struct ccp_dm_workarea src, dst;
1630 if (!ecc->u.pm.point_1.x ||
1631 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1632 !ecc->u.pm.point_1.y ||
1633 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1636 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1637 if (!ecc->u.pm.point_2.x ||
1638 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1639 !ecc->u.pm.point_2.y ||
1640 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1643 if (!ecc->u.pm.domain_a ||
1644 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1647 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1648 if (!ecc->u.pm.scalar ||
1649 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1653 if (!ecc->u.pm.result.x ||
1654 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1655 !ecc->u.pm.result.y ||
1656 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1659 memset(&op, 0, sizeof(op));
1661 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1663 /* Concatenate the modulus and the operands. Both the modulus and
1664 * the operands must be in little endian format. Since the input
1665 * is in big endian format it must be converted and placed in a
1666 * fixed length buffer.
1668 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1673 /* Save the workarea address since it is updated in order to perform
1678 /* Copy the ECC modulus */
1679 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
1682 src.address += CCP_ECC_OPERAND_SIZE;
1684 /* Copy the first point X and Y coordinate */
1685 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
1686 ecc->u.pm.point_1.x_len);
1689 src.address += CCP_ECC_OPERAND_SIZE;
1690 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
1691 ecc->u.pm.point_1.y_len);
1694 src.address += CCP_ECC_OPERAND_SIZE;
1696 /* Set the first point Z coordinate to 1 */
1697 *src.address = 0x01;
1698 src.address += CCP_ECC_OPERAND_SIZE;
1700 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1701 /* Copy the second point X and Y coordinate */
1702 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
1703 ecc->u.pm.point_2.x_len);
1706 src.address += CCP_ECC_OPERAND_SIZE;
1707 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
1708 ecc->u.pm.point_2.y_len);
1711 src.address += CCP_ECC_OPERAND_SIZE;
1713 /* Set the second point Z coordinate to 1 */
1714 *src.address = 0x01;
1715 src.address += CCP_ECC_OPERAND_SIZE;
1717 /* Copy the Domain "a" parameter */
1718 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
1719 ecc->u.pm.domain_a_len);
1722 src.address += CCP_ECC_OPERAND_SIZE;
1724 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1725 /* Copy the scalar value */
1726 ret = ccp_reverse_set_dm_area(&src, 0,
1727 ecc->u.pm.scalar, 0,
1728 ecc->u.pm.scalar_len);
1731 src.address += CCP_ECC_OPERAND_SIZE;
1735 /* Restore the workarea address */
1738 /* Prepare the output area for the operation */
1739 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1745 op.src.u.dma.address = src.dma.address;
1746 op.src.u.dma.offset = 0;
1747 op.src.u.dma.length = src.length;
1748 op.dst.u.dma.address = dst.dma.address;
1749 op.dst.u.dma.offset = 0;
1750 op.dst.u.dma.length = dst.length;
1752 op.u.ecc.function = cmd->u.ecc.function;
1754 ret = cmd_q->ccp->vdata->perform->ecc(&op);
1756 cmd->engine_error = cmd_q->cmd_error;
1760 ecc->ecc_result = le16_to_cpup(
1761 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1762 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1767 /* Save the workarea address since it is updated as we walk through
1768 * to copy the point math result
1772 /* Save the ECC result X and Y coordinates */
1773 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
1774 CCP_ECC_MODULUS_BYTES);
1775 dst.address += CCP_ECC_OUTPUT_SIZE;
1776 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
1777 CCP_ECC_MODULUS_BYTES);
1778 dst.address += CCP_ECC_OUTPUT_SIZE;
1780 /* Restore the workarea address */
1792 static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1794 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1796 ecc->ecc_result = 0;
1799 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1802 switch (ecc->function) {
1803 case CCP_ECC_FUNCTION_MMUL_384BIT:
1804 case CCP_ECC_FUNCTION_MADD_384BIT:
1805 case CCP_ECC_FUNCTION_MINV_384BIT:
1806 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1808 case CCP_ECC_FUNCTION_PADD_384BIT:
1809 case CCP_ECC_FUNCTION_PMUL_384BIT:
1810 case CCP_ECC_FUNCTION_PDBL_384BIT:
1811 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1818 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1822 cmd->engine_error = 0;
1823 cmd_q->cmd_error = 0;
1824 cmd_q->int_rcvd = 0;
1825 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
1827 switch (cmd->engine) {
1828 case CCP_ENGINE_AES:
1829 ret = ccp_run_aes_cmd(cmd_q, cmd);
1831 case CCP_ENGINE_XTS_AES_128:
1832 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1834 case CCP_ENGINE_SHA:
1835 ret = ccp_run_sha_cmd(cmd_q, cmd);
1837 case CCP_ENGINE_RSA:
1838 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1840 case CCP_ENGINE_PASSTHRU:
1841 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1842 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1844 ret = ccp_run_passthru_cmd(cmd_q, cmd);
1846 case CCP_ENGINE_ECC:
1847 ret = ccp_run_ecc_cmd(cmd_q, cmd);