1 // SPDX-License-Identifier: GPL-2.0-only
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/aes.h>
13 #include <crypto/des.h>
17 struct mv_cesa_des_ctx {
18 struct mv_cesa_ctx base;
22 struct mv_cesa_des3_ctx {
23 struct mv_cesa_ctx base;
24 u8 key[DES3_EDE_KEY_SIZE];
27 struct mv_cesa_aes_ctx {
28 struct mv_cesa_ctx base;
29 struct crypto_aes_ctx aes;
32 struct mv_cesa_skcipher_dma_iter {
33 struct mv_cesa_dma_iter base;
34 struct mv_cesa_sg_dma_iter src;
35 struct mv_cesa_sg_dma_iter dst;
39 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
40 struct skcipher_request *req)
42 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
43 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
44 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
48 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
50 iter->src.op_offset = 0;
51 iter->dst.op_offset = 0;
53 return mv_cesa_req_dma_iter_next_op(&iter->base);
57 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
59 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
61 if (req->dst != req->src) {
62 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
64 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70 mv_cesa_dma_cleanup(&creq->base);
73 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
75 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
77 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
78 mv_cesa_skcipher_dma_cleanup(req);
81 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
83 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
84 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
85 struct mv_cesa_engine *engine = creq->base.engine;
86 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
87 CESA_SA_SRAM_PAYLOAD_SIZE);
89 mv_cesa_adjust_op(engine, &sreq->op);
90 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
92 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
93 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
97 mv_cesa_set_crypt_op_len(&sreq->op, len);
99 /* FIXME: only update enc_len field */
100 if (!sreq->skip_ctx) {
101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
102 sreq->skip_ctx = true;
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
109 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
110 CESA_SA_CMD_EN_CESA_SA_ACCL0);
111 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
114 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
117 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
118 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
119 struct mv_cesa_engine *engine = creq->base.engine;
122 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
123 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
124 sreq->size, sreq->offset);
127 if (sreq->offset < req->cryptlen)
133 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
136 struct skcipher_request *skreq = skcipher_request_cast(req);
137 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
138 struct mv_cesa_req *basereq = &creq->base;
140 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
141 return mv_cesa_skcipher_std_process(skreq, status);
143 return mv_cesa_dma_process(basereq, status);
146 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
148 struct skcipher_request *skreq = skcipher_request_cast(req);
149 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
151 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
152 mv_cesa_dma_step(&creq->base);
154 mv_cesa_skcipher_std_step(skreq);
158 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
160 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
161 struct mv_cesa_req *basereq = &creq->base;
163 mv_cesa_dma_prepare(basereq, basereq->engine);
167 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
169 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
170 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
176 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
177 struct mv_cesa_engine *engine)
179 struct skcipher_request *skreq = skcipher_request_cast(req);
180 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
181 creq->base.engine = engine;
183 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
184 mv_cesa_skcipher_dma_prepare(skreq);
186 mv_cesa_skcipher_std_prepare(skreq);
190 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
192 struct skcipher_request *skreq = skcipher_request_cast(req);
194 mv_cesa_skcipher_cleanup(skreq);
198 mv_cesa_skcipher_complete(struct crypto_async_request *req)
200 struct skcipher_request *skreq = skcipher_request_cast(req);
201 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
202 struct mv_cesa_engine *engine = creq->base.engine;
205 atomic_sub(skreq->cryptlen, &engine->load);
206 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
208 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
209 struct mv_cesa_req *basereq;
211 basereq = &creq->base;
212 memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
215 memcpy_fromio(skreq->iv,
216 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
221 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
222 .step = mv_cesa_skcipher_step,
223 .process = mv_cesa_skcipher_process,
224 .cleanup = mv_cesa_skcipher_req_cleanup,
225 .complete = mv_cesa_skcipher_complete,
228 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
230 void *ctx = crypto_tfm_ctx(tfm);
232 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
235 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
237 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
239 ctx->ops = &mv_cesa_skcipher_req_ops;
241 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
242 sizeof(struct mv_cesa_skcipher_req));
247 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
250 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
251 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
257 ret = crypto_aes_expand_key(&ctx->aes, key, len);
259 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
263 remaining = (ctx->aes.key_length - 16) / 4;
264 offset = ctx->aes.key_length + 24 - remaining;
265 for (i = 0; i < remaining; i++)
266 ctx->aes.key_dec[4 + i] =
267 cpu_to_le32(ctx->aes.key_enc[offset + i]);
272 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
275 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
276 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
277 u32 tmp[DES_EXPKEY_WORDS];
280 if (len != DES_KEY_SIZE) {
281 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 ret = des_ekey(tmp, key);
286 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
287 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
291 memcpy(ctx->key, key, DES_KEY_SIZE);
296 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
297 const u8 *key, unsigned int len)
299 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
302 err = des3_verify_key(cipher, key);
306 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
311 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
312 const struct mv_cesa_op_ctx *op_templ)
314 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
315 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
316 GFP_KERNEL : GFP_ATOMIC;
317 struct mv_cesa_req *basereq = &creq->base;
318 struct mv_cesa_skcipher_dma_iter iter;
319 bool skip_ctx = false;
322 basereq->chain.first = NULL;
323 basereq->chain.last = NULL;
325 if (req->src != req->dst) {
326 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
331 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
338 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
344 mv_cesa_tdma_desc_iter_init(&basereq->chain);
345 mv_cesa_skcipher_req_iter_init(&iter, req);
348 struct mv_cesa_op_ctx *op;
350 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
357 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
359 /* Add input transfers */
360 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
365 /* Add dummy desc to launch the crypto operation */
366 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
370 /* Add output transfers */
371 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
376 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
378 /* Add output data for IV */
379 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
380 CESA_SA_DATA_SRAM_OFFSET,
381 CESA_TDMA_SRC_IN_SRAM, flags);
386 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
391 mv_cesa_dma_cleanup(basereq);
392 if (req->dst != req->src)
393 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
397 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
398 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
404 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
405 const struct mv_cesa_op_ctx *op_templ)
407 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
408 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
409 struct mv_cesa_req *basereq = &creq->base;
411 sreq->op = *op_templ;
412 sreq->skip_ctx = false;
413 basereq->chain.first = NULL;
414 basereq->chain.last = NULL;
419 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
420 struct mv_cesa_op_ctx *tmpl)
422 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
423 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
424 unsigned int blksize = crypto_skcipher_blocksize(tfm);
427 if (!IS_ALIGNED(req->cryptlen, blksize))
430 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
431 if (creq->src_nents < 0) {
432 dev_err(cesa_dev->dev, "Invalid number of src SG");
433 return creq->src_nents;
435 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
436 if (creq->dst_nents < 0) {
437 dev_err(cesa_dev->dev, "Invalid number of dst SG");
438 return creq->dst_nents;
441 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
442 CESA_SA_DESC_CFG_OP_MSK);
444 if (cesa_dev->caps->has_tdma)
445 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
447 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
452 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
453 struct mv_cesa_op_ctx *tmpl)
456 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
457 struct mv_cesa_engine *engine;
459 ret = mv_cesa_skcipher_req_init(req, tmpl);
463 engine = mv_cesa_select_engine(req->cryptlen);
464 mv_cesa_skcipher_prepare(&req->base, engine);
466 ret = mv_cesa_queue_req(&req->base, &creq->base);
468 if (mv_cesa_req_needs_cleanup(&req->base, ret))
469 mv_cesa_skcipher_cleanup(req);
474 static int mv_cesa_des_op(struct skcipher_request *req,
475 struct mv_cesa_op_ctx *tmpl)
477 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
479 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
480 CESA_SA_DESC_CFG_CRYPTM_MSK);
482 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
484 return mv_cesa_skcipher_queue_req(req, tmpl);
487 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
489 struct mv_cesa_op_ctx tmpl;
491 mv_cesa_set_op_cfg(&tmpl,
492 CESA_SA_DESC_CFG_CRYPTCM_ECB |
493 CESA_SA_DESC_CFG_DIR_ENC);
495 return mv_cesa_des_op(req, &tmpl);
498 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
500 struct mv_cesa_op_ctx tmpl;
502 mv_cesa_set_op_cfg(&tmpl,
503 CESA_SA_DESC_CFG_CRYPTCM_ECB |
504 CESA_SA_DESC_CFG_DIR_DEC);
506 return mv_cesa_des_op(req, &tmpl);
509 struct skcipher_alg mv_cesa_ecb_des_alg = {
510 .setkey = mv_cesa_des_setkey,
511 .encrypt = mv_cesa_ecb_des_encrypt,
512 .decrypt = mv_cesa_ecb_des_decrypt,
513 .min_keysize = DES_KEY_SIZE,
514 .max_keysize = DES_KEY_SIZE,
516 .cra_name = "ecb(des)",
517 .cra_driver_name = "mv-ecb-des",
519 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
520 .cra_blocksize = DES_BLOCK_SIZE,
521 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
523 .cra_module = THIS_MODULE,
524 .cra_init = mv_cesa_skcipher_cra_init,
525 .cra_exit = mv_cesa_skcipher_cra_exit,
529 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
530 struct mv_cesa_op_ctx *tmpl)
532 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
533 CESA_SA_DESC_CFG_CRYPTCM_MSK);
535 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
537 return mv_cesa_des_op(req, tmpl);
540 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
542 struct mv_cesa_op_ctx tmpl;
544 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
546 return mv_cesa_cbc_des_op(req, &tmpl);
549 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
551 struct mv_cesa_op_ctx tmpl;
553 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
555 return mv_cesa_cbc_des_op(req, &tmpl);
558 struct skcipher_alg mv_cesa_cbc_des_alg = {
559 .setkey = mv_cesa_des_setkey,
560 .encrypt = mv_cesa_cbc_des_encrypt,
561 .decrypt = mv_cesa_cbc_des_decrypt,
562 .min_keysize = DES_KEY_SIZE,
563 .max_keysize = DES_KEY_SIZE,
564 .ivsize = DES_BLOCK_SIZE,
566 .cra_name = "cbc(des)",
567 .cra_driver_name = "mv-cbc-des",
569 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
570 .cra_blocksize = DES_BLOCK_SIZE,
571 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
573 .cra_module = THIS_MODULE,
574 .cra_init = mv_cesa_skcipher_cra_init,
575 .cra_exit = mv_cesa_skcipher_cra_exit,
579 static int mv_cesa_des3_op(struct skcipher_request *req,
580 struct mv_cesa_op_ctx *tmpl)
582 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
584 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
585 CESA_SA_DESC_CFG_CRYPTM_MSK);
587 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
589 return mv_cesa_skcipher_queue_req(req, tmpl);
592 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
594 struct mv_cesa_op_ctx tmpl;
596 mv_cesa_set_op_cfg(&tmpl,
597 CESA_SA_DESC_CFG_CRYPTCM_ECB |
598 CESA_SA_DESC_CFG_3DES_EDE |
599 CESA_SA_DESC_CFG_DIR_ENC);
601 return mv_cesa_des3_op(req, &tmpl);
604 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
606 struct mv_cesa_op_ctx tmpl;
608 mv_cesa_set_op_cfg(&tmpl,
609 CESA_SA_DESC_CFG_CRYPTCM_ECB |
610 CESA_SA_DESC_CFG_3DES_EDE |
611 CESA_SA_DESC_CFG_DIR_DEC);
613 return mv_cesa_des3_op(req, &tmpl);
616 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
617 .setkey = mv_cesa_des3_ede_setkey,
618 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
619 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
620 .min_keysize = DES3_EDE_KEY_SIZE,
621 .max_keysize = DES3_EDE_KEY_SIZE,
622 .ivsize = DES3_EDE_BLOCK_SIZE,
624 .cra_name = "ecb(des3_ede)",
625 .cra_driver_name = "mv-ecb-des3-ede",
627 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
628 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
629 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
631 .cra_module = THIS_MODULE,
632 .cra_init = mv_cesa_skcipher_cra_init,
633 .cra_exit = mv_cesa_skcipher_cra_exit,
637 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
638 struct mv_cesa_op_ctx *tmpl)
640 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
642 return mv_cesa_des3_op(req, tmpl);
645 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
647 struct mv_cesa_op_ctx tmpl;
649 mv_cesa_set_op_cfg(&tmpl,
650 CESA_SA_DESC_CFG_CRYPTCM_CBC |
651 CESA_SA_DESC_CFG_3DES_EDE |
652 CESA_SA_DESC_CFG_DIR_ENC);
654 return mv_cesa_cbc_des3_op(req, &tmpl);
657 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
659 struct mv_cesa_op_ctx tmpl;
661 mv_cesa_set_op_cfg(&tmpl,
662 CESA_SA_DESC_CFG_CRYPTCM_CBC |
663 CESA_SA_DESC_CFG_3DES_EDE |
664 CESA_SA_DESC_CFG_DIR_DEC);
666 return mv_cesa_cbc_des3_op(req, &tmpl);
669 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
670 .setkey = mv_cesa_des3_ede_setkey,
671 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
672 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
673 .min_keysize = DES3_EDE_KEY_SIZE,
674 .max_keysize = DES3_EDE_KEY_SIZE,
675 .ivsize = DES3_EDE_BLOCK_SIZE,
677 .cra_name = "cbc(des3_ede)",
678 .cra_driver_name = "mv-cbc-des3-ede",
680 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
681 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
682 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
684 .cra_module = THIS_MODULE,
685 .cra_init = mv_cesa_skcipher_cra_init,
686 .cra_exit = mv_cesa_skcipher_cra_exit,
690 static int mv_cesa_aes_op(struct skcipher_request *req,
691 struct mv_cesa_op_ctx *tmpl)
693 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
698 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
700 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
701 key = ctx->aes.key_dec;
703 key = ctx->aes.key_enc;
705 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
706 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
708 if (ctx->aes.key_length == 24)
709 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
710 else if (ctx->aes.key_length == 32)
711 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
713 mv_cesa_update_op_cfg(tmpl, cfg,
714 CESA_SA_DESC_CFG_CRYPTM_MSK |
715 CESA_SA_DESC_CFG_AES_LEN_MSK);
717 return mv_cesa_skcipher_queue_req(req, tmpl);
720 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
722 struct mv_cesa_op_ctx tmpl;
724 mv_cesa_set_op_cfg(&tmpl,
725 CESA_SA_DESC_CFG_CRYPTCM_ECB |
726 CESA_SA_DESC_CFG_DIR_ENC);
728 return mv_cesa_aes_op(req, &tmpl);
731 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
733 struct mv_cesa_op_ctx tmpl;
735 mv_cesa_set_op_cfg(&tmpl,
736 CESA_SA_DESC_CFG_CRYPTCM_ECB |
737 CESA_SA_DESC_CFG_DIR_DEC);
739 return mv_cesa_aes_op(req, &tmpl);
742 struct skcipher_alg mv_cesa_ecb_aes_alg = {
743 .setkey = mv_cesa_aes_setkey,
744 .encrypt = mv_cesa_ecb_aes_encrypt,
745 .decrypt = mv_cesa_ecb_aes_decrypt,
746 .min_keysize = AES_MIN_KEY_SIZE,
747 .max_keysize = AES_MAX_KEY_SIZE,
749 .cra_name = "ecb(aes)",
750 .cra_driver_name = "mv-ecb-aes",
752 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
753 .cra_blocksize = AES_BLOCK_SIZE,
754 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
756 .cra_module = THIS_MODULE,
757 .cra_init = mv_cesa_skcipher_cra_init,
758 .cra_exit = mv_cesa_skcipher_cra_exit,
762 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
763 struct mv_cesa_op_ctx *tmpl)
765 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
766 CESA_SA_DESC_CFG_CRYPTCM_MSK);
767 memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
769 return mv_cesa_aes_op(req, tmpl);
772 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
774 struct mv_cesa_op_ctx tmpl;
776 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
778 return mv_cesa_cbc_aes_op(req, &tmpl);
781 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
783 struct mv_cesa_op_ctx tmpl;
785 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
787 return mv_cesa_cbc_aes_op(req, &tmpl);
790 struct skcipher_alg mv_cesa_cbc_aes_alg = {
791 .setkey = mv_cesa_aes_setkey,
792 .encrypt = mv_cesa_cbc_aes_encrypt,
793 .decrypt = mv_cesa_cbc_aes_decrypt,
794 .min_keysize = AES_MIN_KEY_SIZE,
795 .max_keysize = AES_MAX_KEY_SIZE,
796 .ivsize = AES_BLOCK_SIZE,
798 .cra_name = "cbc(aes)",
799 .cra_driver_name = "mv-cbc-aes",
801 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
802 .cra_blocksize = AES_BLOCK_SIZE,
803 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
805 .cra_module = THIS_MODULE,
806 .cra_init = mv_cesa_skcipher_cra_init,
807 .cra_exit = mv_cesa_skcipher_cra_exit,