1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2021, Linaro Limited. All rights reserved.
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
8 #include <crypto/gcm.h>
9 #include <crypto/authenc.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/scatterwalk.h>
17 #define CCM_NONCE_ADATA_SHIFT 6
18 #define CCM_NONCE_AUTHSIZE_SHIFT 3
19 #define MAX_CCM_ADATA_HEADER_LEN 6
21 static LIST_HEAD(aead_algs);
23 static void qce_aead_done(void *data)
25 struct crypto_async_request *async_req = data;
26 struct aead_request *req = aead_request_cast(async_req);
27 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
28 struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
29 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
30 struct qce_device *qce = tmpl->qce;
31 struct qce_result_dump *result_buf = qce->dma.result_buf;
32 enum dma_data_direction dir_src, dir_dst;
36 unsigned int totallen;
37 unsigned char tag[SHA256_DIGEST_SIZE] = {0};
40 diff_dst = (req->src != req->dst) ? true : false;
41 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
42 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
44 error = qce_dma_terminate_all(&qce->dma);
46 dev_dbg(qce->dev, "aead dma termination error (%d)\n",
49 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
53 if (IS_CCM(rctx->flags)) {
55 sg_free_table(&rctx->src_tbl);
57 sg_free_table(&rctx->dst_tbl);
59 if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
60 sg_free_table(&rctx->dst_tbl);
63 sg_free_table(&rctx->dst_tbl);
66 error = qce_check_status(qce, &status);
67 if (error < 0 && (error != -EBADMSG))
68 dev_err(qce->dev, "aead operation error (%x)\n", status);
70 if (IS_ENCRYPT(rctx->flags)) {
71 totallen = req->cryptlen + req->assoclen;
72 if (IS_CCM(rctx->flags))
73 scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
74 totallen, ctx->authsize, 1);
76 scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
77 totallen, ctx->authsize, 1);
79 } else if (!IS_CCM(rctx->flags)) {
80 totallen = req->cryptlen + req->assoclen - ctx->authsize;
81 scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
82 ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
84 pr_err("Bad message error\n");
89 qce->async_req_done(qce, error);
92 static struct scatterlist *
93 qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
95 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
96 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
97 struct qce_device *qce = tmpl->qce;
99 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
100 return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
103 static struct scatterlist *
104 qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
106 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
108 sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
109 return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
112 static struct scatterlist *
113 qce_aead_prepare_dst_buf(struct aead_request *req)
115 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
116 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
117 struct qce_device *qce = tmpl->qce;
118 struct scatterlist *sg, *msg_sg, __sg[2];
120 unsigned int assoclen = req->assoclen;
121 unsigned int totallen;
124 totallen = rctx->cryptlen + assoclen;
125 rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
126 if (rctx->dst_nents < 0) {
127 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
128 return ERR_PTR(-EINVAL);
130 if (IS_CCM(rctx->flags))
131 rctx->dst_nents += 2;
133 rctx->dst_nents += 1;
135 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
136 GFP_KERNEL : GFP_ATOMIC;
137 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
141 if (IS_CCM(rctx->flags) && assoclen) {
142 /* Get the dst buffer */
143 msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
145 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
152 sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
157 totallen = rctx->cryptlen + rctx->assoclen;
160 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
165 if (IS_CCM(rctx->flags))
166 sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
168 sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
174 rctx->dst_sg = rctx->dst_tbl.sgl;
175 rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
180 sg_free_table(&rctx->dst_tbl);
185 qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
187 struct scatterlist *sg, *msg_sg, __sg[2];
188 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
189 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
190 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
191 unsigned int assoclen = rctx->assoclen;
192 unsigned int adata_header_len, cryptlen, totallen;
197 if (IS_DECRYPT(rctx->flags))
198 cryptlen = rctx->cryptlen + ctx->authsize;
200 cryptlen = rctx->cryptlen;
201 totallen = cryptlen + req->assoclen;
204 msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
206 rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
207 sizeof(unsigned char), GFP_ATOMIC);
212 * Format associated data (RFC3610 and NIST 800-38C)
213 * Even though specification allows for AAD to be up to 2^64 - 1 bytes,
214 * the assoclen field in aead_request is unsigned int and thus limits
215 * the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
216 * while forming the header for AAD.
218 if (assoclen < 0xff00) {
219 adata_header_len = 2;
220 *(__be16 *)rctx->adata = cpu_to_be16(assoclen);
222 adata_header_len = 6;
223 *(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
224 *(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
227 /* Copy the associated data */
228 if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
229 rctx->adata + adata_header_len,
230 assoclen) != assoclen)
233 /* Pad associated data to block size */
234 rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
236 diff_dst = (req->src != req->dst) ? true : false;
239 rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
241 rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
243 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
244 ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
248 /* Associated Data */
249 sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
250 sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
257 sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
264 * For decrypt, when src and dst buffers are same, there is already space
265 * in the buffer for padded 0's which is output in lieu of
266 * the MAC that is input. So skip the below.
268 if (!IS_DECRYPT(rctx->flags)) {
269 sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
277 rctx->src_sg = rctx->src_tbl.sgl;
278 totallen = cryptlen + rctx->assoclen;
279 rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
282 sg = qce_aead_prepare_dst_buf(req);
288 if (IS_ENCRYPT(rctx->flags))
289 rctx->dst_nents = rctx->src_nents + 1;
291 rctx->dst_nents = rctx->src_nents;
292 rctx->dst_sg = rctx->src_sg;
297 sg_free_table(&rctx->src_tbl);
301 static int qce_aead_prepare_buf(struct aead_request *req)
303 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
304 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
305 struct qce_device *qce = tmpl->qce;
306 struct scatterlist *sg;
307 bool diff_dst = (req->src != req->dst) ? true : false;
308 unsigned int totallen;
310 totallen = rctx->cryptlen + rctx->assoclen;
312 sg = qce_aead_prepare_dst_buf(req);
316 rctx->src_nents = sg_nents_for_len(req->src, totallen);
317 if (rctx->src_nents < 0) {
318 dev_err(qce->dev, "Invalid numbers of src SG.\n");
321 rctx->src_sg = req->src;
323 rctx->src_nents = rctx->dst_nents - 1;
324 rctx->src_sg = rctx->dst_sg;
329 static int qce_aead_ccm_prepare_buf(struct aead_request *req)
331 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
332 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
333 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
334 struct scatterlist *sg;
335 bool diff_dst = (req->src != req->dst) ? true : false;
336 unsigned int cryptlen;
339 return qce_aead_ccm_prepare_buf_assoclen(req);
341 if (IS_ENCRYPT(rctx->flags))
342 return qce_aead_prepare_buf(req);
344 cryptlen = rctx->cryptlen + ctx->authsize;
346 rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
347 rctx->src_sg = req->src;
348 sg = qce_aead_prepare_dst_buf(req);
352 rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
353 rctx->src_sg = req->src;
354 rctx->dst_nents = rctx->src_nents;
355 rctx->dst_sg = rctx->src_sg;
361 static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
363 unsigned int msglen_size, ivsize;
367 if (!rctx || !rctx->iv)
370 msglen_size = rctx->iv[0] + 1;
372 /* Verify that msg len size is valid */
373 if (msglen_size < 2 || msglen_size > 8)
376 ivsize = rctx->ivsize;
379 * Clear the msglen bytes in IV.
380 * Else the h/w engine and nonce will use any stray value pending there.
382 if (!IS_CCM_RFC4309(rctx->flags)) {
383 for (i = 0; i < msglen_size; i++)
384 rctx->iv[ivsize - i - 1] = 0;
388 * The crypto framework encodes cryptlen as unsigned int. Thus, even though
389 * spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
394 memcpy(&msg_len[0], &rctx->cryptlen, 4);
396 memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
398 rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
399 rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
400 CCM_NONCE_AUTHSIZE_SHIFT;
401 for (i = 0; i < msglen_size; i++)
402 rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
408 qce_aead_async_req_handle(struct crypto_async_request *async_req)
410 struct aead_request *req = aead_request_cast(async_req);
411 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
412 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
413 struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
414 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
415 struct qce_device *qce = tmpl->qce;
416 enum dma_data_direction dir_src, dir_dst;
418 int dst_nents, src_nents, ret;
420 if (IS_CCM_RFC4309(rctx->flags)) {
421 memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
422 rctx->ccm_rfc4309_iv[0] = 3;
423 memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
424 memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
425 rctx->iv = rctx->ccm_rfc4309_iv;
426 rctx->ivsize = AES_BLOCK_SIZE;
429 rctx->ivsize = crypto_aead_ivsize(tfm);
431 if (IS_CCM_RFC4309(rctx->flags))
432 rctx->assoclen = req->assoclen - 8;
434 rctx->assoclen = req->assoclen;
436 diff_dst = (req->src != req->dst) ? true : false;
437 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
438 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
440 if (IS_CCM(rctx->flags)) {
441 ret = qce_aead_create_ccm_nonce(rctx, ctx);
445 if (IS_CCM(rctx->flags))
446 ret = qce_aead_ccm_prepare_buf(req);
448 ret = qce_aead_prepare_buf(req);
452 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
459 src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
462 goto error_unmap_dst;
465 if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
466 src_nents = dst_nents;
468 src_nents = dst_nents - 1;
471 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
472 qce_aead_done, async_req);
474 goto error_unmap_src;
476 qce_dma_issue_pending(&qce->dma);
478 ret = qce_start(async_req, tmpl->crypto_alg_type);
480 goto error_terminate;
485 qce_dma_terminate_all(&qce->dma);
488 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
490 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
492 if (IS_CCM(rctx->flags) && rctx->assoclen) {
493 sg_free_table(&rctx->src_tbl);
495 sg_free_table(&rctx->dst_tbl);
497 sg_free_table(&rctx->dst_tbl);
502 static int qce_aead_crypt(struct aead_request *req, int encrypt)
504 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
505 struct qce_aead_reqctx *rctx = aead_request_ctx(req);
506 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
507 struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
508 unsigned int blocksize = crypto_aead_blocksize(tfm);
510 rctx->flags = tmpl->alg_flags;
511 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
514 rctx->cryptlen = req->cryptlen;
516 rctx->cryptlen = req->cryptlen - ctx->authsize;
518 /* CE does not handle 0 length messages */
519 if (!rctx->cryptlen) {
520 if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
521 ctx->need_fallback = true;
524 /* If fallback is needed, schedule and exit */
525 if (ctx->need_fallback) {
526 /* Reset need_fallback in case the same ctx is used for another transaction */
527 ctx->need_fallback = false;
529 aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
530 aead_request_set_callback(&rctx->fallback_req, req->base.flags,
531 req->base.complete, req->base.data);
532 aead_request_set_crypt(&rctx->fallback_req, req->src,
533 req->dst, req->cryptlen, req->iv);
534 aead_request_set_ad(&rctx->fallback_req, req->assoclen);
536 return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
537 crypto_aead_decrypt(&rctx->fallback_req);
541 * CBC algorithms require message lengths to be
542 * multiples of block size.
544 if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
547 /* RFC4309 supported AAD size 16 bytes/20 bytes */
548 if (IS_CCM_RFC4309(rctx->flags))
549 if (crypto_ipsec_check_assoclen(req->assoclen))
552 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
555 static int qce_aead_encrypt(struct aead_request *req)
557 return qce_aead_crypt(req, 1);
560 static int qce_aead_decrypt(struct aead_request *req)
562 return qce_aead_crypt(req, 0);
565 static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
568 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
569 unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
571 if (IS_CCM_RFC4309(flags)) {
572 if (keylen < QCE_CCM4309_SALT_SIZE)
574 keylen -= QCE_CCM4309_SALT_SIZE;
575 memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
578 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
581 ctx->enc_keylen = keylen;
582 ctx->auth_keylen = keylen;
584 memcpy(ctx->enc_key, key, keylen);
585 memcpy(ctx->auth_key, key, keylen);
587 if (keylen == AES_KEYSIZE_192)
588 ctx->need_fallback = true;
590 return IS_CCM_RFC4309(flags) ?
591 crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
592 crypto_aead_setkey(ctx->fallback, key, keylen);
595 static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
597 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
598 struct crypto_authenc_keys authenc_keys;
599 unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
603 err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
607 if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
608 authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
612 err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
615 } else if (IS_3DES(flags)) {
616 err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
620 * The crypto engine does not support any two keys
621 * being the same for triple des algorithms. The
622 * verify_skcipher_des3_key does not check for all the
623 * below conditions. Schedule fallback in this case.
625 memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
626 if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
627 !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
628 !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
629 ctx->need_fallback = true;
630 } else if (IS_AES(flags)) {
631 /* No random key sizes */
632 if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
633 authenc_keys.enckeylen != AES_KEYSIZE_192 &&
634 authenc_keys.enckeylen != AES_KEYSIZE_256)
636 if (authenc_keys.enckeylen == AES_KEYSIZE_192)
637 ctx->need_fallback = true;
640 ctx->enc_keylen = authenc_keys.enckeylen;
641 ctx->auth_keylen = authenc_keys.authkeylen;
643 memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
645 memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
646 memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
648 return crypto_aead_setkey(ctx->fallback, key, keylen);
651 static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
653 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
654 unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
657 if (authsize < 4 || authsize > 16 || authsize % 2)
659 if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
662 ctx->authsize = authsize;
664 return crypto_aead_setauthsize(ctx->fallback, authsize);
667 static int qce_aead_init(struct crypto_aead *tfm)
669 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
671 ctx->need_fallback = false;
672 ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
673 0, CRYPTO_ALG_NEED_FALLBACK);
675 if (IS_ERR(ctx->fallback))
676 return PTR_ERR(ctx->fallback);
678 crypto_aead_set_reqsize(tfm, sizeof(struct qce_aead_reqctx) +
679 crypto_aead_reqsize(ctx->fallback));
683 static void qce_aead_exit(struct crypto_aead *tfm)
685 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
687 crypto_free_aead(ctx->fallback);
690 struct qce_aead_def {
693 const char *drv_name;
694 unsigned int blocksize;
695 unsigned int chunksize;
697 unsigned int maxauthsize;
700 static const struct qce_aead_def aead_def[] = {
702 .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
703 .name = "authenc(hmac(sha1),cbc(des))",
704 .drv_name = "authenc-hmac-sha1-cbc-des-qce",
705 .blocksize = DES_BLOCK_SIZE,
706 .ivsize = DES_BLOCK_SIZE,
707 .maxauthsize = SHA1_DIGEST_SIZE,
710 .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
711 .name = "authenc(hmac(sha1),cbc(des3_ede))",
712 .drv_name = "authenc-hmac-sha1-cbc-3des-qce",
713 .blocksize = DES3_EDE_BLOCK_SIZE,
714 .ivsize = DES3_EDE_BLOCK_SIZE,
715 .maxauthsize = SHA1_DIGEST_SIZE,
718 .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
719 .name = "authenc(hmac(sha256),cbc(des))",
720 .drv_name = "authenc-hmac-sha256-cbc-des-qce",
721 .blocksize = DES_BLOCK_SIZE,
722 .ivsize = DES_BLOCK_SIZE,
723 .maxauthsize = SHA256_DIGEST_SIZE,
726 .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
727 .name = "authenc(hmac(sha256),cbc(des3_ede))",
728 .drv_name = "authenc-hmac-sha256-cbc-3des-qce",
729 .blocksize = DES3_EDE_BLOCK_SIZE,
730 .ivsize = DES3_EDE_BLOCK_SIZE,
731 .maxauthsize = SHA256_DIGEST_SIZE,
734 .flags = QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
735 .name = "authenc(hmac(sha256),cbc(aes))",
736 .drv_name = "authenc-hmac-sha256-cbc-aes-qce",
737 .blocksize = AES_BLOCK_SIZE,
738 .ivsize = AES_BLOCK_SIZE,
739 .maxauthsize = SHA256_DIGEST_SIZE,
742 .flags = QCE_ALG_AES | QCE_MODE_CCM,
744 .drv_name = "ccm-aes-qce",
746 .ivsize = AES_BLOCK_SIZE,
747 .maxauthsize = AES_BLOCK_SIZE,
750 .flags = QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
751 .name = "rfc4309(ccm(aes))",
752 .drv_name = "rfc4309-ccm-aes-qce",
755 .maxauthsize = AES_BLOCK_SIZE,
759 static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
761 struct qce_alg_template *tmpl;
762 struct aead_alg *alg;
765 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
769 alg = &tmpl->alg.aead;
771 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
772 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
775 alg->base.cra_blocksize = def->blocksize;
776 alg->chunksize = def->chunksize;
777 alg->ivsize = def->ivsize;
778 alg->maxauthsize = def->maxauthsize;
779 if (IS_CCM(def->flags))
780 alg->setkey = qce_aead_ccm_setkey;
782 alg->setkey = qce_aead_setkey;
783 alg->setauthsize = qce_aead_setauthsize;
784 alg->encrypt = qce_aead_encrypt;
785 alg->decrypt = qce_aead_decrypt;
786 alg->init = qce_aead_init;
787 alg->exit = qce_aead_exit;
789 alg->base.cra_priority = 300;
790 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
791 CRYPTO_ALG_ALLOCATES_MEMORY |
792 CRYPTO_ALG_KERN_DRIVER_ONLY |
793 CRYPTO_ALG_NEED_FALLBACK;
794 alg->base.cra_ctxsize = sizeof(struct qce_aead_ctx);
795 alg->base.cra_alignmask = 0;
796 alg->base.cra_module = THIS_MODULE;
798 INIT_LIST_HEAD(&tmpl->entry);
799 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
800 tmpl->alg_flags = def->flags;
803 ret = crypto_register_aead(alg);
806 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
810 list_add_tail(&tmpl->entry, &aead_algs);
811 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
815 static void qce_aead_unregister(struct qce_device *qce)
817 struct qce_alg_template *tmpl, *n;
819 list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
820 crypto_unregister_aead(&tmpl->alg.aead);
821 list_del(&tmpl->entry);
826 static int qce_aead_register(struct qce_device *qce)
830 for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
831 ret = qce_aead_register_one(&aead_def[i], qce);
838 qce_aead_unregister(qce);
842 const struct qce_algo_ops aead_ops = {
843 .type = CRYPTO_ALG_TYPE_AEAD,
844 .register_algs = qce_aead_register,
845 .unregister_algs = qce_aead_unregister,
846 .async_req_handle = qce_aead_async_req_handle,