2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 unsigned char *input, int err);
99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
101 return ctx->crypto_ctx->aeadctx;
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
106 return ctx->crypto_ctx->ablkctx;
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
111 return ctx->crypto_ctx->hmacctx;
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
116 return gctx->ctx->gcm;
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
121 return gctx->ctx->authenc;
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
126 return container_of(ctx->dev, struct uld_ctx, dev);
129 static inline int is_ofld_imm(const struct sk_buff *skb)
131 return (skb->len <= SGE_MAX_WR_LEN);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
145 unsigned int skip_len = 0;
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
168 static inline int get_aead_subtype(struct crypto_aead *aead)
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
203 if (dev->state == CHCR_DETACH)
205 atomic_inc(&dev->inflight);
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
211 atomic_dec(&dev->inflight);
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215 unsigned char *input,
218 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 struct chcr_dev *dev = a_ctx(tfm)->dev;
222 chcr_aead_common_exit(req);
223 if (reqctx->verify == VERIFY_SW) {
224 chcr_verify_tag(req, input, &err);
225 reqctx->verify = VERIFY_HW;
227 chcr_dec_wrcount(dev);
228 req->base.complete(&req->base, err);
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234 const unsigned char *key,
235 unsigned int keylength)
243 case AES_KEYLENGTH_128BIT:
244 nk = KEYLENGTH_4BYTES;
245 nr = NUMBER_OF_ROUNDS_10;
247 case AES_KEYLENGTH_192BIT:
248 nk = KEYLENGTH_6BYTES;
249 nr = NUMBER_OF_ROUNDS_12;
251 case AES_KEYLENGTH_256BIT:
252 nk = KEYLENGTH_8BYTES;
253 nr = NUMBER_OF_ROUNDS_14;
258 for (i = 0; i < nk; i++)
259 w_ring[i] = get_unaligned_be32(&key[i * 4]);
262 temp = w_ring[nk - 1];
263 while (i + nk < (nr + 1) * 4) {
266 temp = (temp << 8) | (temp >> 24);
267 temp = aes_ks_subword(temp);
268 temp ^= round_constant[i / nk];
269 } else if (nk == 8 && (i % 4 == 0)) {
270 temp = aes_ks_subword(temp);
272 w_ring[i % nk] ^= temp;
273 temp = w_ring[i % nk];
277 for (k = 0, j = i % nk; k < nk; k++) {
278 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
287 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
290 case SHA1_DIGEST_SIZE:
291 base_hash = crypto_alloc_shash("sha1", 0, 0);
293 case SHA224_DIGEST_SIZE:
294 base_hash = crypto_alloc_shash("sha224", 0, 0);
296 case SHA256_DIGEST_SIZE:
297 base_hash = crypto_alloc_shash("sha256", 0, 0);
299 case SHA384_DIGEST_SIZE:
300 base_hash = crypto_alloc_shash("sha384", 0, 0);
302 case SHA512_DIGEST_SIZE:
303 base_hash = crypto_alloc_shash("sha512", 0, 0);
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311 char *iopad, char *result_hash,
314 struct sha1_state sha1_st;
315 struct sha256_state sha256_st;
316 struct sha512_state sha512_st;
319 if (digest_size == SHA1_DIGEST_SIZE) {
320 error = crypto_shash_init(desc) ?:
321 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 crypto_shash_export(desc, (void *)&sha1_st);
323 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 } else if (digest_size == SHA224_DIGEST_SIZE) {
325 error = crypto_shash_init(desc) ?:
326 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 crypto_shash_export(desc, (void *)&sha256_st);
328 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
330 } else if (digest_size == SHA256_DIGEST_SIZE) {
331 error = crypto_shash_init(desc) ?:
332 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 crypto_shash_export(desc, (void *)&sha256_st);
334 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
336 } else if (digest_size == SHA384_DIGEST_SIZE) {
337 error = crypto_shash_init(desc) ?:
338 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 crypto_shash_export(desc, (void *)&sha512_st);
340 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
342 } else if (digest_size == SHA512_DIGEST_SIZE) {
343 error = crypto_shash_init(desc) ?:
344 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 crypto_shash_export(desc, (void *)&sha512_st);
346 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
349 pr_err("Unknown digest size %d\n", digest_size);
354 static void chcr_change_order(char *buf, int ds)
358 if (ds == SHA512_DIGEST_SIZE) {
359 for (i = 0; i < (ds / sizeof(u64)); i++)
360 *((__be64 *)buf + i) =
361 cpu_to_be64(*((u64 *)buf + i));
363 for (i = 0; i < (ds / sizeof(u32)); i++)
364 *((__be32 *)buf + i) =
365 cpu_to_be32(*((u32 *)buf + i));
369 static inline int is_hmac(struct crypto_tfm *tfm)
371 struct crypto_alg *alg = tfm->__crt_alg;
372 struct chcr_alg_template *chcr_crypto_alg =
373 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
375 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 struct cpl_rx_phys_dsgl *dsgl)
385 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
391 struct cpl_rx_phys_dsgl *phys_cpl;
393 phys_cpl = walk->dsgl;
395 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 phys_cpl->pcirlxorder_to_noofsgentr =
398 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 phys_cpl->rss_hdr_int.qid = htons(qid);
406 phys_cpl->rss_hdr_int.hash_val = 0;
407 phys_cpl->rss_hdr_int.channel = pci_chan_id;
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
419 walk->to->len[j % 8] = htons(size);
420 walk->to->addr[j % 8] = cpu_to_be64(addr);
427 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
428 struct scatterlist *sg,
433 unsigned int left_size = slen, len = 0;
434 unsigned int j = walk->nents;
440 if (sg_dma_len(sg) <= skip) {
441 skip -= sg_dma_len(sg);
450 while (left_size && sg) {
451 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
454 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
455 walk->to->len[j % 8] = htons(ent_len);
456 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
465 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 skip_len) + skip_len;
467 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 struct ulptx_sgl *ulp)
480 walk->pair = ulp->sge;
481 walk->last_sg = NULL;
482 walk->last_sg_len = 0;
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
487 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 ULPTX_NSGE_V(walk->nents));
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
499 if (walk->nents == 0) {
500 walk->sgl->len0 = cpu_to_be32(size);
501 walk->sgl->addr0 = cpu_to_be64(addr);
503 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 walk->pair_idx = !walk->pair_idx;
512 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
513 struct scatterlist *sg,
524 if (sg_dma_len(sg) <= skip) {
525 skip -= sg_dma_len(sg);
533 WARN(!sg, "SG should not be null here\n");
534 if (sg && (walk->nents == 0)) {
535 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 walk->sgl->len0 = cpu_to_be32(sgmin);
538 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
542 walk->last_sg_len = sgmin + skip_len;
544 if (sg_dma_len(sg) == skip_len) {
551 small = min(sg_dma_len(sg) - skip_len, len);
552 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 walk->pair->addr[walk->pair_idx] =
555 cpu_to_be64(sg_dma_address(sg) + skip_len);
556 walk->pair_idx = !walk->pair_idx;
563 walk->last_sg_len = skip_len;
564 if (sg_dma_len(sg) == skip_len) {
571 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
573 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
574 struct chcr_alg_template *chcr_crypto_alg =
575 container_of(alg, struct chcr_alg_template, alg.skcipher);
577 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
582 struct adapter *adap = netdev2adap(dev);
583 struct sge_uld_txq_info *txq_info =
584 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 struct sge_uld_txq *txq;
589 txq = &txq_info->uldtxq[idx];
590 spin_lock(&txq->sendq.lock);
593 spin_unlock(&txq->sendq.lock);
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 struct _key_ctx *key_ctx)
601 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
605 ablkctx->key + (ablkctx->enckey_len >> 1),
606 ablkctx->enckey_len >> 1);
607 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 ablkctx->rrkey, ablkctx->enckey_len >> 1);
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
616 unsigned int srcskip)
620 int soffset = 0, sless;
622 if (sg_dma_len(src) == srcskip) {
626 while (src && space > (sgl_ent_len[srcsg + 1])) {
627 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
632 if (sg_dma_len(src) == (soffset + srcskip)) {
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 struct scatterlist *dst,
645 unsigned int srcskip,
646 unsigned int dstskip)
648 int srclen = 0, dstlen = 0;
649 int srcsg = minsg, dstsg = minsg;
650 int offset = 0, soffset = 0, less, sless = 0;
652 if (sg_dma_len(src) == srcskip) {
656 if (sg_dma_len(dst) == dstskip) {
662 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
668 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 if (srclen <= dstlen)
672 less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 dstskip, CHCR_DST_SG_SIZE);
676 if ((offset + dstskip) == sg_dma_len(dst)) {
684 if ((soffset + srcskip) == sg_dma_len(src)) {
691 return min(srclen, dstlen);
694 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
695 struct skcipher_request *req,
697 unsigned short op_type)
699 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
702 skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
703 skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
704 req->base.complete, req->base.data);
705 skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
708 err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
709 crypto_skcipher_encrypt(&reqctx->fallback_req);
715 static inline int get_qidxs(struct crypto_async_request *req,
716 unsigned int *txqidx, unsigned int *rxqidx)
718 struct crypto_tfm *tfm = req->tfm;
721 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
722 case CRYPTO_ALG_TYPE_AEAD:
724 struct aead_request *aead_req =
725 container_of(req, struct aead_request, base);
726 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
727 *txqidx = reqctx->txqidx;
728 *rxqidx = reqctx->rxqidx;
731 case CRYPTO_ALG_TYPE_SKCIPHER:
733 struct skcipher_request *sk_req =
734 container_of(req, struct skcipher_request, base);
735 struct chcr_skcipher_req_ctx *reqctx =
736 skcipher_request_ctx(sk_req);
737 *txqidx = reqctx->txqidx;
738 *rxqidx = reqctx->rxqidx;
741 case CRYPTO_ALG_TYPE_AHASH:
743 struct ahash_request *ahash_req =
744 container_of(req, struct ahash_request, base);
745 struct chcr_ahash_req_ctx *reqctx =
746 ahash_request_ctx(ahash_req);
747 *txqidx = reqctx->txqidx;
748 *rxqidx = reqctx->rxqidx;
753 /* should never get here */
760 static inline void create_wreq(struct chcr_context *ctx,
761 struct chcr_wr *chcr_req,
762 struct crypto_async_request *req,
769 struct uld_ctx *u_ctx = ULD_CTX(ctx);
770 unsigned int tx_channel_id, rx_channel_id;
771 unsigned int txqidx = 0, rxqidx = 0;
772 unsigned int qid, fid;
774 get_qidxs(req, &txqidx, &rxqidx);
775 qid = u_ctx->lldi.rxq_ids[rxqidx];
776 fid = u_ctx->lldi.rxq_ids[0];
777 tx_channel_id = txqidx / ctx->txq_perchan;
778 rx_channel_id = rxqidx / ctx->rxq_perchan;
781 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
782 chcr_req->wreq.pld_size_hash_size =
783 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
784 chcr_req->wreq.len16_pkd =
785 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
786 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
787 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
790 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
791 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
792 ((sizeof(chcr_req->wreq)) >> 4)));
793 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
794 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
795 sizeof(chcr_req->key_ctx) + sc_len);
799 * create_cipher_wr - form the WR for cipher operations
800 * @wrparam: Container for create_cipher_wr()'s parameters
802 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
804 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
805 struct chcr_context *ctx = c_ctx(tfm);
806 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
807 struct sk_buff *skb = NULL;
808 struct chcr_wr *chcr_req;
809 struct cpl_rx_phys_dsgl *phys_cpl;
810 struct ulptx_sgl *ulptx;
811 struct chcr_skcipher_req_ctx *reqctx =
812 skcipher_request_ctx(wrparam->req);
813 unsigned int temp = 0, transhdr_len, dst_size;
816 unsigned int kctx_len;
817 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
818 GFP_KERNEL : GFP_ATOMIC;
819 struct adapter *adap = padap(ctx->dev);
820 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
822 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
824 dst_size = get_space_for_phys_dsgl(nents);
825 kctx_len = roundup(ablkctx->enckey_len, 16);
826 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
827 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
828 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
829 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
830 (sgl_len(nents) * 8);
831 transhdr_len += temp;
832 transhdr_len = roundup(transhdr_len, 16);
833 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
838 chcr_req = __skb_put_zero(skb, transhdr_len);
839 chcr_req->sec_cpl.op_ivinsrtofst =
840 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
842 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
843 chcr_req->sec_cpl.aadstart_cipherstop_hi =
844 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
846 chcr_req->sec_cpl.cipherstop_lo_authinsert =
847 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
848 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
851 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
854 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
855 if ((reqctx->op == CHCR_DECRYPT_OP) &&
856 (!(get_cryptoalg_subtype(tfm) ==
857 CRYPTO_ALG_SUB_TYPE_CTR)) &&
858 (!(get_cryptoalg_subtype(tfm) ==
859 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
860 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
862 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
863 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
864 memcpy(chcr_req->key_ctx.key, ablkctx->key,
865 ablkctx->enckey_len);
867 memcpy(chcr_req->key_ctx.key, ablkctx->key +
868 (ablkctx->enckey_len >> 1),
869 ablkctx->enckey_len >> 1);
870 memcpy(chcr_req->key_ctx.key +
871 (ablkctx->enckey_len >> 1),
873 ablkctx->enckey_len >> 1);
876 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
877 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
878 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
879 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
881 atomic_inc(&adap->chcr_stats.cipher_rqst);
882 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
883 + (reqctx->imm ? (wrparam->bytes) : 0);
884 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
886 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
889 if (reqctx->op && (ablkctx->ciph_mode ==
890 CHCR_SCMD_CIPHER_MODE_AES_CBC))
891 sg_pcopy_to_buffer(wrparam->req->src,
892 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
893 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
897 return ERR_PTR(error);
900 static inline int chcr_keyctx_ck_size(unsigned int keylen)
904 if (keylen == AES_KEYSIZE_128)
905 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
906 else if (keylen == AES_KEYSIZE_192)
907 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
908 else if (keylen == AES_KEYSIZE_256)
909 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
915 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
919 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
921 crypto_skcipher_clear_flags(ablkctx->sw_cipher,
922 CRYPTO_TFM_REQ_MASK);
923 crypto_skcipher_set_flags(ablkctx->sw_cipher,
924 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
925 return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
928 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
932 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
933 unsigned int ck_size, context_size;
937 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
941 ck_size = chcr_keyctx_ck_size(keylen);
942 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
943 memcpy(ablkctx->key, key, keylen);
944 ablkctx->enckey_len = keylen;
945 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
946 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
947 keylen + alignment) >> 4;
949 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
951 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
954 ablkctx->enckey_len = 0;
959 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
963 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
964 unsigned int ck_size, context_size;
968 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
971 ck_size = chcr_keyctx_ck_size(keylen);
972 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973 memcpy(ablkctx->key, key, keylen);
974 ablkctx->enckey_len = keylen;
975 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976 keylen + alignment) >> 4;
978 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
980 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
984 ablkctx->enckey_len = 0;
989 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
993 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
994 unsigned int ck_size, context_size;
998 if (keylen < CTR_RFC3686_NONCE_SIZE)
1000 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1001 CTR_RFC3686_NONCE_SIZE);
1003 keylen -= CTR_RFC3686_NONCE_SIZE;
1004 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1008 ck_size = chcr_keyctx_ck_size(keylen);
1009 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1010 memcpy(ablkctx->key, key, keylen);
1011 ablkctx->enckey_len = keylen;
1012 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1013 keylen + alignment) >> 4;
1015 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1016 0, 0, context_size);
1017 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1021 ablkctx->enckey_len = 0;
1025 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1027 unsigned int size = AES_BLOCK_SIZE;
1028 __be32 *b = (__be32 *)(dstiv + size);
1031 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1032 for (; size >= 4; size -= 4) {
1033 prev = be32_to_cpu(*--b);
1035 *b = cpu_to_be32(c);
1043 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1045 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1047 u32 temp = be32_to_cpu(*--b);
1050 c = (u64)temp + 1; // No of block can processed without overflow
1051 if ((bytes / AES_BLOCK_SIZE) >= c)
1052 bytes = c * AES_BLOCK_SIZE;
1056 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1059 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1060 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1061 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1062 struct crypto_aes_ctx aes;
1065 unsigned int keylen;
1066 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1067 int round8 = round / 8;
1069 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1071 keylen = ablkctx->enckey_len / 2;
1072 key = ablkctx->key + keylen;
1073 /* For a 192 bit key remove the padded zeroes which was
1074 * added in chcr_xts_setkey
1076 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1077 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1078 ret = aes_expandkey(&aes, key, keylen - 8);
1080 ret = aes_expandkey(&aes, key, keylen);
1083 aes_encrypt(&aes, iv, iv);
1084 for (i = 0; i < round8; i++)
1085 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1087 for (i = 0; i < (round % 8); i++)
1088 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1091 aes_decrypt(&aes, iv, iv);
1093 memzero_explicit(&aes, sizeof(aes));
1097 static int chcr_update_cipher_iv(struct skcipher_request *req,
1098 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1100 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1101 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1102 int subtype = get_cryptoalg_subtype(tfm);
1105 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1106 ctr_add_iv(iv, req->iv, (reqctx->processed /
1108 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1109 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1110 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1111 AES_BLOCK_SIZE) + 1);
1112 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1113 ret = chcr_update_tweak(req, iv, 0);
1114 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1116 /*Updated before sending last WR*/
1117 memcpy(iv, req->iv, AES_BLOCK_SIZE);
1119 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1126 /* We need separate function for final iv because in rfc3686 Initial counter
1127 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1128 * for subsequent update requests
1131 static int chcr_final_cipher_iv(struct skcipher_request *req,
1132 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1134 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1135 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1136 int subtype = get_cryptoalg_subtype(tfm);
1139 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1140 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1142 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1143 if (!reqctx->partial_req)
1144 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1146 ret = chcr_update_tweak(req, iv, 1);
1148 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1149 /*Already updated for Decrypt*/
1151 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1158 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1159 unsigned char *input, int err)
1161 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1162 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1163 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1164 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1165 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1166 struct chcr_dev *dev = c_ctx(tfm)->dev;
1167 struct chcr_context *ctx = c_ctx(tfm);
1168 struct adapter *adap = padap(ctx->dev);
1169 struct cipher_wr_param wrparam;
1170 struct sk_buff *skb;
1175 if (req->cryptlen == reqctx->processed) {
1176 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1178 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1183 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1184 CIP_SPACE_LEFT(ablkctx->enckey_len),
1185 reqctx->src_ofst, reqctx->dst_ofst);
1186 if ((bytes + reqctx->processed) >= req->cryptlen)
1187 bytes = req->cryptlen - reqctx->processed;
1189 bytes = rounddown(bytes, 16);
1191 /*CTR mode counter overfloa*/
1192 bytes = req->cryptlen - reqctx->processed;
1194 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1198 if (unlikely(bytes == 0)) {
1199 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1201 memcpy(req->iv, reqctx->init_iv, IV);
1202 atomic_inc(&adap->chcr_stats.fallback);
1203 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1208 if (get_cryptoalg_subtype(tfm) ==
1209 CRYPTO_ALG_SUB_TYPE_CTR)
1210 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1211 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1213 wrparam.bytes = bytes;
1214 skb = create_cipher_wr(&wrparam);
1216 pr_err("%s : Failed to form WR. No memory\n", __func__);
1220 skb->dev = u_ctx->lldi.ports[0];
1221 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1223 reqctx->last_req_len = bytes;
1224 reqctx->processed += bytes;
1225 if (get_cryptoalg_subtype(tfm) ==
1226 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1227 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1228 complete(&ctx->cbc_aes_aio_done);
1232 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1234 if (get_cryptoalg_subtype(tfm) ==
1235 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1236 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1237 complete(&ctx->cbc_aes_aio_done);
1239 chcr_dec_wrcount(dev);
1240 req->base.complete(&req->base, err);
1244 static int process_cipher(struct skcipher_request *req,
1246 struct sk_buff **skb,
1247 unsigned short op_type)
1249 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1250 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1251 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1252 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1253 struct adapter *adap = padap(c_ctx(tfm)->dev);
1254 struct cipher_wr_param wrparam;
1255 int bytes, err = -EINVAL;
1258 reqctx->processed = 0;
1259 reqctx->partial_req = 0;
1262 subtype = get_cryptoalg_subtype(tfm);
1263 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1264 (req->cryptlen == 0) ||
1265 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1266 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1268 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1269 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1271 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1272 ablkctx->enckey_len, req->cryptlen, ivsize);
1276 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1279 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1281 sizeof(struct cpl_rx_phys_dsgl) +
1284 /* Can be sent as Imm*/
1285 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1287 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1288 CHCR_DST_SG_SIZE, 0);
1289 phys_dsgl = get_space_for_phys_dsgl(dnents);
1290 kctx_len = roundup(ablkctx->enckey_len, 16);
1291 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1292 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1294 bytes = IV + req->cryptlen;
1301 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1302 CIP_SPACE_LEFT(ablkctx->enckey_len),
1304 if ((bytes + reqctx->processed) >= req->cryptlen)
1305 bytes = req->cryptlen - reqctx->processed;
1307 bytes = rounddown(bytes, 16);
1309 bytes = req->cryptlen;
1311 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1312 bytes = adjust_ctr_overflow(req->iv, bytes);
1314 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1315 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1316 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1317 CTR_RFC3686_IV_SIZE);
1319 /* initialize counter portion of counter block */
1320 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1321 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1322 memcpy(reqctx->init_iv, reqctx->iv, IV);
1326 memcpy(reqctx->iv, req->iv, IV);
1327 memcpy(reqctx->init_iv, req->iv, IV);
1329 if (unlikely(bytes == 0)) {
1330 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1332 fallback: atomic_inc(&adap->chcr_stats.fallback);
1333 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1335 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1336 reqctx->iv : req->iv,
1340 reqctx->op = op_type;
1341 reqctx->srcsg = req->src;
1342 reqctx->dstsg = req->dst;
1343 reqctx->src_ofst = 0;
1344 reqctx->dst_ofst = 0;
1347 wrparam.bytes = bytes;
1348 *skb = create_cipher_wr(&wrparam);
1350 err = PTR_ERR(*skb);
1353 reqctx->processed = bytes;
1354 reqctx->last_req_len = bytes;
1355 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1359 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1364 static int chcr_aes_encrypt(struct skcipher_request *req)
1366 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1367 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1368 struct chcr_dev *dev = c_ctx(tfm)->dev;
1369 struct sk_buff *skb = NULL;
1371 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1372 struct chcr_context *ctx = c_ctx(tfm);
1376 reqctx->txqidx = cpu % ctx->ntxq;
1377 reqctx->rxqidx = cpu % ctx->nrxq;
1380 err = chcr_inc_wrcount(dev);
1383 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1385 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1390 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1391 &skb, CHCR_ENCRYPT_OP);
1394 skb->dev = u_ctx->lldi.ports[0];
1395 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1397 if (get_cryptoalg_subtype(tfm) ==
1398 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1399 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1400 reqctx->partial_req = 1;
1401 wait_for_completion(&ctx->cbc_aes_aio_done);
1403 return -EINPROGRESS;
1405 chcr_dec_wrcount(dev);
1409 static int chcr_aes_decrypt(struct skcipher_request *req)
1411 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1412 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1413 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1414 struct chcr_dev *dev = c_ctx(tfm)->dev;
1415 struct sk_buff *skb = NULL;
1417 struct chcr_context *ctx = c_ctx(tfm);
1421 reqctx->txqidx = cpu % ctx->ntxq;
1422 reqctx->rxqidx = cpu % ctx->nrxq;
1425 err = chcr_inc_wrcount(dev);
1429 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1431 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1433 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1434 &skb, CHCR_DECRYPT_OP);
1437 skb->dev = u_ctx->lldi.ports[0];
1438 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1440 return -EINPROGRESS;
1442 static int chcr_device_init(struct chcr_context *ctx)
1444 struct uld_ctx *u_ctx = NULL;
1445 int txq_perchan, ntxq;
1446 int err = 0, rxq_perchan;
1449 u_ctx = assign_chcr_device();
1452 pr_err("chcr device assignment fails\n");
1455 ctx->dev = &u_ctx->dev;
1456 ntxq = u_ctx->lldi.ntxq;
1457 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1458 txq_perchan = ntxq / u_ctx->lldi.nchan;
1460 ctx->nrxq = u_ctx->lldi.nrxq;
1461 ctx->rxq_perchan = rxq_perchan;
1462 ctx->txq_perchan = txq_perchan;
1468 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1470 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1471 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1472 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1474 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1475 CRYPTO_ALG_NEED_FALLBACK);
1476 if (IS_ERR(ablkctx->sw_cipher)) {
1477 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1478 return PTR_ERR(ablkctx->sw_cipher);
1480 init_completion(&ctx->cbc_aes_aio_done);
1481 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1482 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1484 return chcr_device_init(ctx);
1487 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1489 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1490 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1491 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1493 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1494 * cannot be used as fallback in chcr_handle_cipher_response
1496 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1497 CRYPTO_ALG_NEED_FALLBACK);
1498 if (IS_ERR(ablkctx->sw_cipher)) {
1499 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1500 return PTR_ERR(ablkctx->sw_cipher);
1502 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1503 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1504 return chcr_device_init(ctx);
1508 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1510 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1511 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1513 crypto_free_skcipher(ablkctx->sw_cipher);
1516 static int get_alg_config(struct algo_param *params,
1517 unsigned int auth_size)
1519 switch (auth_size) {
1520 case SHA1_DIGEST_SIZE:
1521 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1522 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1523 params->result_size = SHA1_DIGEST_SIZE;
1525 case SHA224_DIGEST_SIZE:
1526 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1527 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1528 params->result_size = SHA256_DIGEST_SIZE;
1530 case SHA256_DIGEST_SIZE:
1531 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1532 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1533 params->result_size = SHA256_DIGEST_SIZE;
1535 case SHA384_DIGEST_SIZE:
1536 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1537 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1538 params->result_size = SHA512_DIGEST_SIZE;
1540 case SHA512_DIGEST_SIZE:
1541 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1542 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1543 params->result_size = SHA512_DIGEST_SIZE;
1546 pr_err("ERROR, unsupported digest size\n");
1552 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1554 crypto_free_shash(base_hash);
1558 * create_hash_wr - Create hash work request
1559 * @req: Cipher req base
1560 * @param: Container for create_hash_wr()'s parameters
1562 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1563 struct hash_wr_param *param)
1565 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1566 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1567 struct chcr_context *ctx = h_ctx(tfm);
1568 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1569 struct sk_buff *skb = NULL;
1570 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1571 struct chcr_wr *chcr_req;
1572 struct ulptx_sgl *ulptx;
1573 unsigned int nents = 0, transhdr_len;
1574 unsigned int temp = 0;
1575 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1577 struct adapter *adap = padap(h_ctx(tfm)->dev);
1579 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1581 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1582 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1583 param->sg_len) <= SGE_MAX_WR_LEN;
1584 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1585 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1586 nents += param->bfr_len ? 1 : 0;
1587 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1588 param->sg_len, 16) : (sgl_len(nents) * 8);
1589 transhdr_len = roundup(transhdr_len, 16);
1591 skb = alloc_skb(transhdr_len, flags);
1593 return ERR_PTR(-ENOMEM);
1594 chcr_req = __skb_put_zero(skb, transhdr_len);
1596 chcr_req->sec_cpl.op_ivinsrtofst =
1597 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1599 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1601 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1602 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1603 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1604 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1605 chcr_req->sec_cpl.seqno_numivs =
1606 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1607 param->opad_needed, 0);
1609 chcr_req->sec_cpl.ivgen_hdrlen =
1610 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1612 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1613 param->alg_prm.result_size);
1615 if (param->opad_needed)
1616 memcpy(chcr_req->key_ctx.key +
1617 ((param->alg_prm.result_size <= 32) ? 32 :
1618 CHCR_HASH_MAX_DIGEST_SIZE),
1619 hmacctx->opad, param->alg_prm.result_size);
1621 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1622 param->alg_prm.mk_size, 0,
1625 sizeof(chcr_req->key_ctx)) >> 4));
1626 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1627 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1629 if (param->bfr_len != 0) {
1630 req_ctx->hctx_wr.dma_addr =
1631 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1632 param->bfr_len, DMA_TO_DEVICE);
1633 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1634 req_ctx->hctx_wr. dma_addr)) {
1638 req_ctx->hctx_wr.dma_len = param->bfr_len;
1640 req_ctx->hctx_wr.dma_addr = 0;
1642 chcr_add_hash_src_ent(req, ulptx, param);
1643 /* Request upto max wr size */
1644 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1645 (param->sg_len + param->bfr_len) : 0);
1646 atomic_inc(&adap->chcr_stats.digest_rqst);
1647 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1648 param->hash_size, transhdr_len,
1650 req_ctx->hctx_wr.skb = skb;
1654 return ERR_PTR(error);
1657 static int chcr_ahash_update(struct ahash_request *req)
1659 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1660 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1661 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1662 struct chcr_context *ctx = h_ctx(rtfm);
1663 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1664 struct sk_buff *skb;
1665 u8 remainder = 0, bs;
1666 unsigned int nbytes = req->nbytes;
1667 struct hash_wr_param params;
1672 req_ctx->txqidx = cpu % ctx->ntxq;
1673 req_ctx->rxqidx = cpu % ctx->nrxq;
1676 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1678 if (nbytes + req_ctx->reqlen >= bs) {
1679 remainder = (nbytes + req_ctx->reqlen) % bs;
1680 nbytes = nbytes + req_ctx->reqlen - remainder;
1682 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1683 + req_ctx->reqlen, nbytes, 0);
1684 req_ctx->reqlen += nbytes;
1687 error = chcr_inc_wrcount(dev);
1690 /* Detach state for CHCR means lldi or padap is freed. Increasing
1691 * inflight count for dev guarantees that lldi and padap is valid
1693 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1695 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1700 chcr_init_hctx_per_wr(req_ctx);
1701 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1706 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1707 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1708 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1709 HASH_SPACE_LEFT(params.kctx_len), 0);
1710 if (params.sg_len > req->nbytes)
1711 params.sg_len = req->nbytes;
1712 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1714 params.opad_needed = 0;
1717 params.bfr_len = req_ctx->reqlen;
1719 req_ctx->hctx_wr.srcsg = req->src;
1721 params.hash_size = params.alg_prm.result_size;
1722 req_ctx->data_len += params.sg_len + params.bfr_len;
1723 skb = create_hash_wr(req, ¶ms);
1725 error = PTR_ERR(skb);
1729 req_ctx->hctx_wr.processed += params.sg_len;
1732 swap(req_ctx->reqbfr, req_ctx->skbfr);
1733 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1734 req_ctx->reqbfr, remainder, req->nbytes -
1737 req_ctx->reqlen = remainder;
1738 skb->dev = u_ctx->lldi.ports[0];
1739 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1741 return -EINPROGRESS;
1743 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1745 chcr_dec_wrcount(dev);
1749 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1751 memset(bfr_ptr, 0, bs);
1754 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1756 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1759 static int chcr_ahash_final(struct ahash_request *req)
1761 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1762 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1763 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1764 struct hash_wr_param params;
1765 struct sk_buff *skb;
1766 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1767 struct chcr_context *ctx = h_ctx(rtfm);
1768 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1773 req_ctx->txqidx = cpu % ctx->ntxq;
1774 req_ctx->rxqidx = cpu % ctx->nrxq;
1777 error = chcr_inc_wrcount(dev);
1781 chcr_init_hctx_per_wr(req_ctx);
1782 if (is_hmac(crypto_ahash_tfm(rtfm)))
1783 params.opad_needed = 1;
1785 params.opad_needed = 0;
1787 req_ctx->hctx_wr.isfinal = 1;
1788 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1789 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1790 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1791 params.opad_needed = 1;
1792 params.kctx_len *= 2;
1794 params.opad_needed = 0;
1797 req_ctx->hctx_wr.result = 1;
1798 params.bfr_len = req_ctx->reqlen;
1799 req_ctx->data_len += params.bfr_len + params.sg_len;
1800 req_ctx->hctx_wr.srcsg = req->src;
1801 if (req_ctx->reqlen == 0) {
1802 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1806 params.bfr_len = bs;
1809 params.scmd1 = req_ctx->data_len;
1813 params.hash_size = crypto_ahash_digestsize(rtfm);
1814 skb = create_hash_wr(req, ¶ms);
1816 error = PTR_ERR(skb);
1819 req_ctx->reqlen = 0;
1820 skb->dev = u_ctx->lldi.ports[0];
1821 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1823 return -EINPROGRESS;
1825 chcr_dec_wrcount(dev);
1829 static int chcr_ahash_finup(struct ahash_request *req)
1831 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1832 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1833 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1834 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1835 struct chcr_context *ctx = h_ctx(rtfm);
1836 struct sk_buff *skb;
1837 struct hash_wr_param params;
1843 req_ctx->txqidx = cpu % ctx->ntxq;
1844 req_ctx->rxqidx = cpu % ctx->nrxq;
1847 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1848 error = chcr_inc_wrcount(dev);
1852 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1854 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1858 chcr_init_hctx_per_wr(req_ctx);
1859 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1865 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1866 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1867 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1868 params.kctx_len *= 2;
1869 params.opad_needed = 1;
1871 params.opad_needed = 0;
1874 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1875 HASH_SPACE_LEFT(params.kctx_len), 0);
1876 if (params.sg_len < req->nbytes) {
1877 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1878 params.kctx_len /= 2;
1879 params.opad_needed = 0;
1883 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1885 params.hash_size = params.alg_prm.result_size;
1890 params.sg_len = req->nbytes;
1891 params.hash_size = crypto_ahash_digestsize(rtfm);
1892 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1895 params.bfr_len = req_ctx->reqlen;
1896 req_ctx->data_len += params.bfr_len + params.sg_len;
1897 req_ctx->hctx_wr.result = 1;
1898 req_ctx->hctx_wr.srcsg = req->src;
1899 if ((req_ctx->reqlen + req->nbytes) == 0) {
1900 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1904 params.bfr_len = bs;
1906 skb = create_hash_wr(req, ¶ms);
1908 error = PTR_ERR(skb);
1911 req_ctx->reqlen = 0;
1912 req_ctx->hctx_wr.processed += params.sg_len;
1913 skb->dev = u_ctx->lldi.ports[0];
1914 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1916 return -EINPROGRESS;
1918 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1920 chcr_dec_wrcount(dev);
1924 static int chcr_ahash_digest(struct ahash_request *req)
1926 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1927 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1928 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1929 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1930 struct chcr_context *ctx = h_ctx(rtfm);
1931 struct sk_buff *skb;
1932 struct hash_wr_param params;
1938 req_ctx->txqidx = cpu % ctx->ntxq;
1939 req_ctx->rxqidx = cpu % ctx->nrxq;
1943 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1944 error = chcr_inc_wrcount(dev);
1948 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1950 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1955 chcr_init_hctx_per_wr(req_ctx);
1956 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1962 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1963 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1964 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1965 params.kctx_len *= 2;
1966 params.opad_needed = 1;
1968 params.opad_needed = 0;
1970 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1971 HASH_SPACE_LEFT(params.kctx_len), 0);
1972 if (params.sg_len < req->nbytes) {
1973 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1974 params.kctx_len /= 2;
1975 params.opad_needed = 0;
1980 params.sg_len = rounddown(params.sg_len, bs);
1981 params.hash_size = params.alg_prm.result_size;
1983 params.sg_len = req->nbytes;
1984 params.hash_size = crypto_ahash_digestsize(rtfm);
1987 params.scmd1 = req->nbytes + req_ctx->data_len;
1991 req_ctx->hctx_wr.result = 1;
1992 req_ctx->hctx_wr.srcsg = req->src;
1993 req_ctx->data_len += params.bfr_len + params.sg_len;
1995 if (req->nbytes == 0) {
1996 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1998 params.bfr_len = bs;
2001 skb = create_hash_wr(req, ¶ms);
2003 error = PTR_ERR(skb);
2006 req_ctx->hctx_wr.processed += params.sg_len;
2007 skb->dev = u_ctx->lldi.ports[0];
2008 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2010 return -EINPROGRESS;
2012 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2014 chcr_dec_wrcount(dev);
2018 static int chcr_ahash_continue(struct ahash_request *req)
2020 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2021 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2022 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2023 struct chcr_context *ctx = h_ctx(rtfm);
2024 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2025 struct sk_buff *skb;
2026 struct hash_wr_param params;
2032 reqctx->txqidx = cpu % ctx->ntxq;
2033 reqctx->rxqidx = cpu % ctx->nrxq;
2036 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2037 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
2038 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2039 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2040 params.kctx_len *= 2;
2041 params.opad_needed = 1;
2043 params.opad_needed = 0;
2045 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2046 HASH_SPACE_LEFT(params.kctx_len),
2048 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2049 params.sg_len = req->nbytes - hctx_wr->processed;
2050 if (!hctx_wr->result ||
2051 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2052 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2053 params.kctx_len /= 2;
2054 params.opad_needed = 0;
2058 params.sg_len = rounddown(params.sg_len, bs);
2059 params.hash_size = params.alg_prm.result_size;
2064 params.hash_size = crypto_ahash_digestsize(rtfm);
2065 params.scmd1 = reqctx->data_len + params.sg_len;
2068 reqctx->data_len += params.sg_len;
2069 skb = create_hash_wr(req, ¶ms);
2071 error = PTR_ERR(skb);
2074 hctx_wr->processed += params.sg_len;
2075 skb->dev = u_ctx->lldi.ports[0];
2076 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2083 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2084 unsigned char *input,
2087 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2088 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2089 int digestsize, updated_digestsize;
2090 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2091 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2092 struct chcr_dev *dev = h_ctx(tfm)->dev;
2096 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2097 updated_digestsize = digestsize;
2098 if (digestsize == SHA224_DIGEST_SIZE)
2099 updated_digestsize = SHA256_DIGEST_SIZE;
2100 else if (digestsize == SHA384_DIGEST_SIZE)
2101 updated_digestsize = SHA512_DIGEST_SIZE;
2103 if (hctx_wr->dma_addr) {
2104 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2105 hctx_wr->dma_len, DMA_TO_DEVICE);
2106 hctx_wr->dma_addr = 0;
2108 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2110 if (hctx_wr->result == 1) {
2111 hctx_wr->result = 0;
2112 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2115 memcpy(reqctx->partial_hash,
2116 input + sizeof(struct cpl_fw6_pld),
2117 updated_digestsize);
2122 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2123 updated_digestsize);
2125 err = chcr_ahash_continue(req);
2130 if (hctx_wr->is_sg_map)
2131 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2135 chcr_dec_wrcount(dev);
2136 req->base.complete(&req->base, err);
2140 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2141 * @req: crypto request
2143 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2146 struct crypto_tfm *tfm = req->tfm;
2147 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2148 struct adapter *adap = padap(ctx->dev);
2150 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2151 case CRYPTO_ALG_TYPE_AEAD:
2152 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2155 case CRYPTO_ALG_TYPE_SKCIPHER:
2156 chcr_handle_cipher_resp(skcipher_request_cast(req),
2159 case CRYPTO_ALG_TYPE_AHASH:
2160 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2162 atomic_inc(&adap->chcr_stats.complete);
2165 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2167 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2168 struct chcr_ahash_req_ctx *state = out;
2170 state->reqlen = req_ctx->reqlen;
2171 state->data_len = req_ctx->data_len;
2172 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2173 memcpy(state->partial_hash, req_ctx->partial_hash,
2174 CHCR_HASH_MAX_DIGEST_SIZE);
2175 chcr_init_hctx_per_wr(state);
2179 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2181 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2182 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2184 req_ctx->reqlen = state->reqlen;
2185 req_ctx->data_len = state->data_len;
2186 req_ctx->reqbfr = req_ctx->bfr1;
2187 req_ctx->skbfr = req_ctx->bfr2;
2188 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2189 memcpy(req_ctx->partial_hash, state->partial_hash,
2190 CHCR_HASH_MAX_DIGEST_SIZE);
2191 chcr_init_hctx_per_wr(req_ctx);
2195 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2196 unsigned int keylen)
2198 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2199 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2200 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2201 unsigned int i, err = 0, updated_digestsize;
2203 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2205 /* use the key to calculate the ipad and opad. ipad will sent with the
2206 * first request's data. opad will be sent with the final hash result
2207 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2209 shash->tfm = hmacctx->base_hash;
2211 err = crypto_shash_digest(shash, key, keylen,
2215 keylen = digestsize;
2217 memcpy(hmacctx->ipad, key, keylen);
2219 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2220 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2222 for (i = 0; i < bs / sizeof(int); i++) {
2223 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2224 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2227 updated_digestsize = digestsize;
2228 if (digestsize == SHA224_DIGEST_SIZE)
2229 updated_digestsize = SHA256_DIGEST_SIZE;
2230 else if (digestsize == SHA384_DIGEST_SIZE)
2231 updated_digestsize = SHA512_DIGEST_SIZE;
2232 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2233 hmacctx->ipad, digestsize);
2236 chcr_change_order(hmacctx->ipad, updated_digestsize);
2238 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2239 hmacctx->opad, digestsize);
2242 chcr_change_order(hmacctx->opad, updated_digestsize);
2247 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2248 unsigned int key_len)
2250 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2251 unsigned short context_size = 0;
2254 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2258 memcpy(ablkctx->key, key, key_len);
2259 ablkctx->enckey_len = key_len;
2260 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2261 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2262 /* Both keys for xts must be aligned to 16 byte boundary
2263 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2265 if (key_len == 48) {
2266 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2268 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2269 memset(ablkctx->key + 24, 0, 8);
2270 memset(ablkctx->key + 56, 0, 8);
2271 ablkctx->enckey_len = 64;
2272 ablkctx->key_ctx_hdr =
2273 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2274 CHCR_KEYCTX_NO_KEY, 1,
2277 ablkctx->key_ctx_hdr =
2278 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2279 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2280 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2281 CHCR_KEYCTX_NO_KEY, 1,
2284 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2287 ablkctx->enckey_len = 0;
2292 static int chcr_sha_init(struct ahash_request *areq)
2294 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2295 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2296 int digestsize = crypto_ahash_digestsize(tfm);
2298 req_ctx->data_len = 0;
2299 req_ctx->reqlen = 0;
2300 req_ctx->reqbfr = req_ctx->bfr1;
2301 req_ctx->skbfr = req_ctx->bfr2;
2302 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2307 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2309 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2310 sizeof(struct chcr_ahash_req_ctx));
2311 return chcr_device_init(crypto_tfm_ctx(tfm));
2314 static int chcr_hmac_init(struct ahash_request *areq)
2316 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2317 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2318 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2319 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2320 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2322 chcr_sha_init(areq);
2323 req_ctx->data_len = bs;
2324 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2325 if (digestsize == SHA224_DIGEST_SIZE)
2326 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2327 SHA256_DIGEST_SIZE);
2328 else if (digestsize == SHA384_DIGEST_SIZE)
2329 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2330 SHA512_DIGEST_SIZE);
2332 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2338 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2340 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2341 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2342 unsigned int digestsize =
2343 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2345 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2346 sizeof(struct chcr_ahash_req_ctx));
2347 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2348 if (IS_ERR(hmacctx->base_hash))
2349 return PTR_ERR(hmacctx->base_hash);
2350 return chcr_device_init(crypto_tfm_ctx(tfm));
2353 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2355 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2356 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2358 if (hmacctx->base_hash) {
2359 chcr_free_shash(hmacctx->base_hash);
2360 hmacctx->base_hash = NULL;
2364 inline void chcr_aead_common_exit(struct aead_request *req)
2366 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2367 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2368 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2370 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2373 static int chcr_aead_common_init(struct aead_request *req)
2375 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2376 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2377 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2378 unsigned int authsize = crypto_aead_authsize(tfm);
2379 int error = -EINVAL;
2381 /* validate key size */
2382 if (aeadctx->enckey_len == 0)
2384 if (reqctx->op && req->cryptlen < authsize)
2387 reqctx->scratch_pad = reqctx->iv + IV;
2389 reqctx->scratch_pad = NULL;
2391 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2403 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2404 int aadmax, int wrlen,
2405 unsigned short op_type)
2407 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2409 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2410 dst_nents > MAX_DSGL_ENT ||
2411 (req->assoclen > aadmax) ||
2412 (wrlen > SGE_MAX_WR_LEN))
2417 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2419 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2420 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2421 struct aead_request *subreq = aead_request_ctx(req);
2423 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2424 aead_request_set_callback(subreq, req->base.flags,
2425 req->base.complete, req->base.data);
2426 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2428 aead_request_set_ad(subreq, req->assoclen);
2429 return op_type ? crypto_aead_decrypt(subreq) :
2430 crypto_aead_encrypt(subreq);
2433 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2437 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2438 struct chcr_context *ctx = a_ctx(tfm);
2439 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2440 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2441 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2442 struct sk_buff *skb = NULL;
2443 struct chcr_wr *chcr_req;
2444 struct cpl_rx_phys_dsgl *phys_cpl;
2445 struct ulptx_sgl *ulptx;
2446 unsigned int transhdr_len;
2447 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2448 unsigned int kctx_len = 0, dnents, snents;
2449 unsigned int authsize = crypto_aead_authsize(tfm);
2450 int error = -EINVAL;
2453 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455 struct adapter *adap = padap(ctx->dev);
2456 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2458 if (req->cryptlen == 0)
2462 error = chcr_aead_common_init(req);
2464 return ERR_PTR(error);
2466 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2467 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2470 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2471 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2472 dnents += MIN_AUTH_SG; // For IV
2473 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2474 CHCR_SRC_SG_SIZE, 0);
2475 dst_size = get_space_for_phys_dsgl(dnents);
2476 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2477 - sizeof(chcr_req->key_ctx);
2478 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2479 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2482 : (sgl_len(snents) * 8);
2483 transhdr_len += temp;
2484 transhdr_len = roundup(transhdr_len, 16);
2486 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2487 transhdr_len, reqctx->op)) {
2488 atomic_inc(&adap->chcr_stats.fallback);
2489 chcr_aead_common_exit(req);
2490 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2492 skb = alloc_skb(transhdr_len, flags);
2498 chcr_req = __skb_put_zero(skb, transhdr_len);
2500 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2503 * Input order is AAD,IV and Payload. where IV should be included as
2504 * the part of authdata. All other fields should be filled according
2505 * to the hardware spec
2507 chcr_req->sec_cpl.op_ivinsrtofst =
2508 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2509 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2510 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512 null ? 0 : IV + req->assoclen,
2513 req->assoclen + IV + 1,
2514 (temp & 0x1F0) >> 4);
2515 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517 null ? 0 : req->assoclen + IV + 1,
2519 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2520 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2521 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2524 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2525 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527 actx->auth_mode, aeadctx->hmac_ctrl,
2529 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2532 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2533 if (reqctx->op == CHCR_ENCRYPT_OP ||
2534 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2535 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2536 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2537 aeadctx->enckey_len);
2539 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2540 aeadctx->enckey_len);
2542 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2543 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2544 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2545 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2546 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2547 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2548 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2549 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2550 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2551 CTR_RFC3686_IV_SIZE);
2552 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2553 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555 memcpy(ivptr, req->iv, IV);
2557 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2558 chcr_add_aead_src_ent(req, ulptx);
2559 atomic_inc(&adap->chcr_stats.cipher_rqst);
2560 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2561 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2562 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2563 transhdr_len, temp, 0);
2568 chcr_aead_common_exit(req);
2570 return ERR_PTR(error);
2573 int chcr_aead_dma_map(struct device *dev,
2574 struct aead_request *req,
2575 unsigned short op_type)
2578 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2579 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2580 unsigned int authsize = crypto_aead_authsize(tfm);
2581 int src_len, dst_len;
2583 /* calculate and handle src and dst sg length separately
2584 * for inplace and out-of place operations
2586 if (req->src == req->dst) {
2587 src_len = req->assoclen + req->cryptlen + (op_type ?
2591 src_len = req->assoclen + req->cryptlen;
2592 dst_len = req->assoclen + req->cryptlen + (op_type ?
2593 -authsize : authsize);
2596 if (!req->cryptlen || !src_len || !dst_len)
2598 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600 if (dma_mapping_error(dev, reqctx->iv_dma))
2603 reqctx->b0_dma = reqctx->iv_dma + IV;
2606 if (req->src == req->dst) {
2607 error = dma_map_sg(dev, req->src,
2608 sg_nents_for_len(req->src, src_len),
2613 error = dma_map_sg(dev, req->src,
2614 sg_nents_for_len(req->src, src_len),
2618 error = dma_map_sg(dev, req->dst,
2619 sg_nents_for_len(req->dst, dst_len),
2622 dma_unmap_sg(dev, req->src,
2623 sg_nents_for_len(req->src, src_len),
2631 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2635 void chcr_aead_dma_unmap(struct device *dev,
2636 struct aead_request *req,
2637 unsigned short op_type)
2639 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2640 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2641 unsigned int authsize = crypto_aead_authsize(tfm);
2642 int src_len, dst_len;
2644 /* calculate and handle src and dst sg length separately
2645 * for inplace and out-of place operations
2647 if (req->src == req->dst) {
2648 src_len = req->assoclen + req->cryptlen + (op_type ?
2652 src_len = req->assoclen + req->cryptlen;
2653 dst_len = req->assoclen + req->cryptlen + (op_type ?
2654 -authsize : authsize);
2657 if (!req->cryptlen || !src_len || !dst_len)
2660 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662 if (req->src == req->dst) {
2663 dma_unmap_sg(dev, req->src,
2664 sg_nents_for_len(req->src, src_len),
2667 dma_unmap_sg(dev, req->src,
2668 sg_nents_for_len(req->src, src_len),
2670 dma_unmap_sg(dev, req->dst,
2671 sg_nents_for_len(req->dst, dst_len),
2676 void chcr_add_aead_src_ent(struct aead_request *req,
2677 struct ulptx_sgl *ulptx)
2679 struct ulptx_walk ulp_walk;
2680 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2683 u8 *buf = (u8 *)ulptx;
2685 if (reqctx->b0_len) {
2686 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2687 buf += reqctx->b0_len;
2689 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2690 buf, req->cryptlen + req->assoclen, 0);
2692 ulptx_walk_init(&ulp_walk, ulptx);
2694 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698 ulptx_walk_end(&ulp_walk);
2702 void chcr_add_aead_dst_ent(struct aead_request *req,
2703 struct cpl_rx_phys_dsgl *phys_cpl,
2706 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2707 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2708 struct dsgl_walk dsgl_walk;
2709 unsigned int authsize = crypto_aead_authsize(tfm);
2710 struct chcr_context *ctx = a_ctx(tfm);
2712 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2714 dsgl_walk_init(&dsgl_walk, phys_cpl);
2715 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2716 temp = req->assoclen + req->cryptlen +
2717 (reqctx->op ? -authsize : authsize);
2718 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2719 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2722 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2724 struct cipher_wr_param *wrparam)
2726 struct ulptx_walk ulp_walk;
2727 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2730 memcpy(buf, reqctx->iv, IV);
2733 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2734 buf, wrparam->bytes, reqctx->processed);
2736 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2737 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2739 reqctx->srcsg = ulp_walk.last_sg;
2740 reqctx->src_ofst = ulp_walk.last_sg_len;
2741 ulptx_walk_end(&ulp_walk);
2745 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2746 struct cpl_rx_phys_dsgl *phys_cpl,
2747 struct cipher_wr_param *wrparam,
2750 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2751 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2752 struct chcr_context *ctx = c_ctx(tfm);
2753 struct dsgl_walk dsgl_walk;
2754 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2756 dsgl_walk_init(&dsgl_walk, phys_cpl);
2757 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2759 reqctx->dstsg = dsgl_walk.last_sg;
2760 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2761 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2764 void chcr_add_hash_src_ent(struct ahash_request *req,
2765 struct ulptx_sgl *ulptx,
2766 struct hash_wr_param *param)
2768 struct ulptx_walk ulp_walk;
2769 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2771 if (reqctx->hctx_wr.imm) {
2772 u8 *buf = (u8 *)ulptx;
2774 if (param->bfr_len) {
2775 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2776 buf += param->bfr_len;
2779 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2780 sg_nents(reqctx->hctx_wr.srcsg), buf,
2783 ulptx_walk_init(&ulp_walk, ulptx);
2785 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2786 reqctx->hctx_wr.dma_addr);
2787 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2788 param->sg_len, reqctx->hctx_wr.src_ofst);
2789 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2790 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2791 ulptx_walk_end(&ulp_walk);
2795 int chcr_hash_dma_map(struct device *dev,
2796 struct ahash_request *req)
2798 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2803 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2807 req_ctx->hctx_wr.is_sg_map = 1;
2811 void chcr_hash_dma_unmap(struct device *dev,
2812 struct ahash_request *req)
2814 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2819 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2821 req_ctx->hctx_wr.is_sg_map = 0;
2825 int chcr_cipher_dma_map(struct device *dev,
2826 struct skcipher_request *req)
2830 if (req->src == req->dst) {
2831 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2836 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2840 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2843 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2854 void chcr_cipher_dma_unmap(struct device *dev,
2855 struct skcipher_request *req)
2857 if (req->src == req->dst) {
2858 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2861 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2863 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2868 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2872 memset(block, 0, csize);
2877 else if (msglen > (unsigned int)(1 << (8 * csize)))
2880 data = cpu_to_be32(msglen);
2881 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2886 static int generate_b0(struct aead_request *req, u8 *ivptr,
2887 unsigned short op_type)
2889 unsigned int l, lp, m;
2891 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2892 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2893 u8 *b0 = reqctx->scratch_pad;
2895 m = crypto_aead_authsize(aead);
2897 memcpy(b0, ivptr, 16);
2902 /* set m, bits 3-5 */
2903 *b0 |= (8 * ((m - 2) / 2));
2905 /* set adata, bit 6, if associated data is used */
2908 rc = set_msg_len(b0 + 16 - l,
2909 (op_type == CHCR_DECRYPT_OP) ?
2910 req->cryptlen - m : req->cryptlen, l);
2915 static inline int crypto_ccm_check_iv(const u8 *iv)
2917 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2918 if (iv[0] < 1 || iv[0] > 7)
2924 static int ccm_format_packet(struct aead_request *req,
2926 unsigned int sub_type,
2927 unsigned short op_type,
2928 unsigned int assoclen)
2930 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2931 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2932 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2935 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2937 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2938 memcpy(ivptr + 4, req->iv, 8);
2939 memset(ivptr + 12, 0, 4);
2941 memcpy(ivptr, req->iv, 16);
2944 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2946 rc = generate_b0(req, ivptr, op_type);
2947 /* zero the ctr value */
2948 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2952 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2953 unsigned int dst_size,
2954 struct aead_request *req,
2955 unsigned short op_type)
2957 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2958 struct chcr_context *ctx = a_ctx(tfm);
2959 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2960 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2961 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2962 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2963 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2964 unsigned int ccm_xtra;
2965 unsigned int tag_offset = 0, auth_offset = 0;
2966 unsigned int assoclen;
2968 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2969 assoclen = req->assoclen - 8;
2971 assoclen = req->assoclen;
2972 ccm_xtra = CCM_B0_SIZE +
2973 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2975 auth_offset = req->cryptlen ?
2976 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2977 if (op_type == CHCR_DECRYPT_OP) {
2978 if (crypto_aead_authsize(tfm) != req->cryptlen)
2979 tag_offset = crypto_aead_authsize(tfm);
2984 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2986 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2987 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2988 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2989 1 + IV, IV + assoclen + ccm_xtra,
2990 req->assoclen + IV + 1 + ccm_xtra, 0);
2992 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2993 auth_offset, tag_offset,
2994 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2995 crypto_aead_authsize(tfm));
2996 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2997 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2998 cipher_mode, mac_mode,
2999 aeadctx->hmac_ctrl, IV >> 1);
3001 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3005 static int aead_ccm_validate_input(unsigned short op_type,
3006 struct aead_request *req,
3007 struct chcr_aead_ctx *aeadctx,
3008 unsigned int sub_type)
3010 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3011 if (crypto_ccm_check_iv(req->iv)) {
3012 pr_err("CCM: IV check fails\n");
3016 if (req->assoclen != 16 && req->assoclen != 20) {
3017 pr_err("RFC4309: Invalid AAD length %d\n",
3025 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3029 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3030 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3031 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3032 struct sk_buff *skb = NULL;
3033 struct chcr_wr *chcr_req;
3034 struct cpl_rx_phys_dsgl *phys_cpl;
3035 struct ulptx_sgl *ulptx;
3036 unsigned int transhdr_len;
3037 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3038 unsigned int sub_type, assoclen = req->assoclen;
3039 unsigned int authsize = crypto_aead_authsize(tfm);
3040 int error = -EINVAL;
3042 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3044 struct adapter *adap = padap(a_ctx(tfm)->dev);
3046 sub_type = get_aead_subtype(tfm);
3047 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3049 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3050 error = chcr_aead_common_init(req);
3052 return ERR_PTR(error);
3054 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3057 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3058 + (reqctx->op ? -authsize : authsize),
3059 CHCR_DST_SG_SIZE, 0);
3060 dnents += MIN_CCM_SG; // For IV and B0
3061 dst_size = get_space_for_phys_dsgl(dnents);
3062 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3063 CHCR_SRC_SG_SIZE, 0);
3064 snents += MIN_CCM_SG; //For B0
3065 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3066 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3067 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3068 reqctx->b0_len) <= SGE_MAX_WR_LEN;
3069 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3070 reqctx->b0_len, 16) :
3071 (sgl_len(snents) * 8);
3072 transhdr_len += temp;
3073 transhdr_len = roundup(transhdr_len, 16);
3075 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3076 reqctx->b0_len, transhdr_len, reqctx->op)) {
3077 atomic_inc(&adap->chcr_stats.fallback);
3078 chcr_aead_common_exit(req);
3079 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3081 skb = alloc_skb(transhdr_len, flags);
3088 chcr_req = __skb_put_zero(skb, transhdr_len);
3090 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3092 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3093 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3094 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3095 aeadctx->key, aeadctx->enckey_len);
3097 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3098 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3099 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3100 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3103 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3104 chcr_add_aead_src_ent(req, ulptx);
3106 atomic_inc(&adap->chcr_stats.aead_rqst);
3107 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3108 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3109 reqctx->b0_len) : 0);
3110 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3111 transhdr_len, temp, 0);
3118 chcr_aead_common_exit(req);
3119 return ERR_PTR(error);
3122 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3126 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3127 struct chcr_context *ctx = a_ctx(tfm);
3128 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3129 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3130 struct sk_buff *skb = NULL;
3131 struct chcr_wr *chcr_req;
3132 struct cpl_rx_phys_dsgl *phys_cpl;
3133 struct ulptx_sgl *ulptx;
3134 unsigned int transhdr_len, dnents = 0, snents;
3135 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3136 unsigned int authsize = crypto_aead_authsize(tfm);
3137 int error = -EINVAL;
3139 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3141 struct adapter *adap = padap(ctx->dev);
3142 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3144 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3145 assoclen = req->assoclen - 8;
3148 error = chcr_aead_common_init(req);
3150 return ERR_PTR(error);
3151 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3152 (reqctx->op ? -authsize : authsize),
3153 CHCR_DST_SG_SIZE, 0);
3154 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3155 CHCR_SRC_SG_SIZE, 0);
3156 dnents += MIN_GCM_SG; // For IV
3157 dst_size = get_space_for_phys_dsgl(dnents);
3158 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3159 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3160 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3162 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3163 (sgl_len(snents) * 8);
3164 transhdr_len += temp;
3165 transhdr_len = roundup(transhdr_len, 16);
3166 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3167 transhdr_len, reqctx->op)) {
3169 atomic_inc(&adap->chcr_stats.fallback);
3170 chcr_aead_common_exit(req);
3171 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3173 skb = alloc_skb(transhdr_len, flags);
3179 chcr_req = __skb_put_zero(skb, transhdr_len);
3181 //Offset of tag from end
3182 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3183 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3184 rx_channel_id, 2, 1);
3185 chcr_req->sec_cpl.pldlen =
3186 htonl(req->assoclen + IV + req->cryptlen);
3187 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3188 assoclen ? 1 + IV : 0,
3189 assoclen ? IV + assoclen : 0,
3190 req->assoclen + IV + 1, 0);
3191 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3192 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3194 chcr_req->sec_cpl.seqno_numivs =
3195 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3196 CHCR_ENCRYPT_OP) ? 1 : 0,
3197 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3198 CHCR_SCMD_AUTH_MODE_GHASH,
3199 aeadctx->hmac_ctrl, IV >> 1);
3200 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3202 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3203 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3204 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3205 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3207 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3208 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3209 /* prepare a 16 byte iv */
3210 /* S A L T | IV | 0x00000001 */
3211 if (get_aead_subtype(tfm) ==
3212 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3213 memcpy(ivptr, aeadctx->salt, 4);
3214 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3216 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3218 put_unaligned_be32(0x01, &ivptr[12]);
3219 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3221 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3222 chcr_add_aead_src_ent(req, ulptx);
3223 atomic_inc(&adap->chcr_stats.aead_rqst);
3224 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3225 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3226 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3227 transhdr_len, temp, reqctx->verify);
3232 chcr_aead_common_exit(req);
3233 return ERR_PTR(error);
3238 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3240 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3241 struct aead_alg *alg = crypto_aead_alg(tfm);
3243 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3244 CRYPTO_ALG_NEED_FALLBACK |
3246 if (IS_ERR(aeadctx->sw_cipher))
3247 return PTR_ERR(aeadctx->sw_cipher);
3248 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3249 sizeof(struct aead_request) +
3250 crypto_aead_reqsize(aeadctx->sw_cipher)));
3251 return chcr_device_init(a_ctx(tfm));
3254 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3256 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3258 crypto_free_aead(aeadctx->sw_cipher);
3261 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3262 unsigned int authsize)
3264 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3266 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3267 aeadctx->mayverify = VERIFY_HW;
3268 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3270 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3271 unsigned int authsize)
3273 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3274 u32 maxauth = crypto_aead_maxauthsize(tfm);
3276 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3277 * true for sha1. authsize == 12 condition should be before
3278 * authsize == (maxauth >> 1)
3280 if (authsize == ICV_4) {
3281 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3282 aeadctx->mayverify = VERIFY_HW;
3283 } else if (authsize == ICV_6) {
3284 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3285 aeadctx->mayverify = VERIFY_HW;
3286 } else if (authsize == ICV_10) {
3287 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3288 aeadctx->mayverify = VERIFY_HW;
3289 } else if (authsize == ICV_12) {
3290 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3291 aeadctx->mayverify = VERIFY_HW;
3292 } else if (authsize == ICV_14) {
3293 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3294 aeadctx->mayverify = VERIFY_HW;
3295 } else if (authsize == (maxauth >> 1)) {
3296 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3297 aeadctx->mayverify = VERIFY_HW;
3298 } else if (authsize == maxauth) {
3299 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3300 aeadctx->mayverify = VERIFY_HW;
3302 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3303 aeadctx->mayverify = VERIFY_SW;
3305 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3309 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3311 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3315 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3316 aeadctx->mayverify = VERIFY_HW;
3319 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3320 aeadctx->mayverify = VERIFY_HW;
3323 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3324 aeadctx->mayverify = VERIFY_HW;
3327 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3328 aeadctx->mayverify = VERIFY_HW;
3331 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3332 aeadctx->mayverify = VERIFY_HW;
3336 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3337 aeadctx->mayverify = VERIFY_SW;
3342 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3345 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3346 unsigned int authsize)
3348 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3352 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3353 aeadctx->mayverify = VERIFY_HW;
3356 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3357 aeadctx->mayverify = VERIFY_HW;
3360 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3361 aeadctx->mayverify = VERIFY_HW;
3366 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3369 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3370 unsigned int authsize)
3372 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3376 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3377 aeadctx->mayverify = VERIFY_HW;
3380 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3381 aeadctx->mayverify = VERIFY_HW;
3384 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3385 aeadctx->mayverify = VERIFY_HW;
3388 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3389 aeadctx->mayverify = VERIFY_HW;
3392 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3393 aeadctx->mayverify = VERIFY_HW;
3396 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3397 aeadctx->mayverify = VERIFY_HW;
3400 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3401 aeadctx->mayverify = VERIFY_HW;
3406 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3409 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3411 unsigned int keylen)
3413 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3414 unsigned char ck_size, mk_size;
3415 int key_ctx_size = 0;
3417 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3418 if (keylen == AES_KEYSIZE_128) {
3419 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3420 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3421 } else if (keylen == AES_KEYSIZE_192) {
3422 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3423 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3424 } else if (keylen == AES_KEYSIZE_256) {
3425 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3426 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3428 aeadctx->enckey_len = 0;
3431 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3433 memcpy(aeadctx->key, key, keylen);
3434 aeadctx->enckey_len = keylen;
3439 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3441 unsigned int keylen)
3443 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3446 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3447 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3448 CRYPTO_TFM_REQ_MASK);
3449 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3452 return chcr_ccm_common_setkey(aead, key, keylen);
3455 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3456 unsigned int keylen)
3458 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3462 aeadctx->enckey_len = 0;
3465 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3466 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3467 CRYPTO_TFM_REQ_MASK);
3468 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3472 memcpy(aeadctx->salt, key + keylen, 3);
3473 return chcr_ccm_common_setkey(aead, key, keylen);
3476 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3477 unsigned int keylen)
3479 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3480 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3481 unsigned int ck_size;
3482 int ret = 0, key_ctx_size = 0;
3483 struct crypto_aes_ctx aes;
3485 aeadctx->enckey_len = 0;
3486 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3487 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3488 & CRYPTO_TFM_REQ_MASK);
3489 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3493 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3495 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3496 memcpy(aeadctx->salt, key + keylen, 4);
3498 if (keylen == AES_KEYSIZE_128) {
3499 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3500 } else if (keylen == AES_KEYSIZE_192) {
3501 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3502 } else if (keylen == AES_KEYSIZE_256) {
3503 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3505 pr_err("GCM: Invalid key length %d\n", keylen);
3510 memcpy(aeadctx->key, key, keylen);
3511 aeadctx->enckey_len = keylen;
3512 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3514 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3515 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3518 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3519 * It will go in key context
3521 ret = aes_expandkey(&aes, key, keylen);
3523 aeadctx->enckey_len = 0;
3526 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3527 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3528 memzero_explicit(&aes, sizeof(aes));
3534 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3535 unsigned int keylen)
3537 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3538 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3539 /* it contains auth and cipher key both*/
3540 struct crypto_authenc_keys keys;
3541 unsigned int bs, subtype;
3542 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3543 int err = 0, i, key_ctx_len = 0;
3544 unsigned char ck_size = 0;
3545 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3546 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3547 struct algo_param param;
3551 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3552 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3553 & CRYPTO_TFM_REQ_MASK);
3554 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3558 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3561 if (get_alg_config(¶m, max_authsize)) {
3562 pr_err("Unsupported digest size\n");
3565 subtype = get_aead_subtype(authenc);
3566 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3567 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3568 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3570 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3571 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3572 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3574 if (keys.enckeylen == AES_KEYSIZE_128) {
3575 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3576 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3577 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3578 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3579 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3581 pr_err("Unsupported cipher key\n");
3585 /* Copy only encryption key. We use authkey to generate h(ipad) and
3586 * h(opad) so authkey is not needed again. authkeylen size have the
3587 * size of the hash digest size.
3589 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3590 aeadctx->enckey_len = keys.enckeylen;
3591 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3592 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3594 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3595 aeadctx->enckey_len << 3);
3597 base_hash = chcr_alloc_shash(max_authsize);
3598 if (IS_ERR(base_hash)) {
3599 pr_err("Base driver cannot be loaded\n");
3603 SHASH_DESC_ON_STACK(shash, base_hash);
3605 shash->tfm = base_hash;
3606 bs = crypto_shash_blocksize(base_hash);
3607 align = KEYCTX_ALIGN_PAD(max_authsize);
3608 o_ptr = actx->h_iopad + param.result_size + align;
3610 if (keys.authkeylen > bs) {
3611 err = crypto_shash_digest(shash, keys.authkey,
3615 pr_err("Base driver cannot be loaded\n");
3618 keys.authkeylen = max_authsize;
3620 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3622 /* Compute the ipad-digest*/
3623 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3624 memcpy(pad, o_ptr, keys.authkeylen);
3625 for (i = 0; i < bs >> 2; i++)
3626 *((unsigned int *)pad + i) ^= IPAD_DATA;
3628 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3631 /* Compute the opad-digest */
3632 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3633 memcpy(pad, o_ptr, keys.authkeylen);
3634 for (i = 0; i < bs >> 2; i++)
3635 *((unsigned int *)pad + i) ^= OPAD_DATA;
3637 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3640 /* convert the ipad and opad digest to network order */
3641 chcr_change_order(actx->h_iopad, param.result_size);
3642 chcr_change_order(o_ptr, param.result_size);
3643 key_ctx_len = sizeof(struct _key_ctx) +
3644 roundup(keys.enckeylen, 16) +
3645 (param.result_size + align) * 2;
3646 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3647 0, 1, key_ctx_len >> 4);
3648 actx->auth_mode = param.auth_mode;
3649 chcr_free_shash(base_hash);
3651 memzero_explicit(&keys, sizeof(keys));
3655 aeadctx->enckey_len = 0;
3656 memzero_explicit(&keys, sizeof(keys));
3657 if (!IS_ERR(base_hash))
3658 chcr_free_shash(base_hash);
3662 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3663 const u8 *key, unsigned int keylen)
3665 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3666 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3667 struct crypto_authenc_keys keys;
3669 /* it contains auth and cipher key both*/
3670 unsigned int subtype;
3671 int key_ctx_len = 0;
3672 unsigned char ck_size = 0;
3674 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3675 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3676 & CRYPTO_TFM_REQ_MASK);
3677 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3681 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3684 subtype = get_aead_subtype(authenc);
3685 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3686 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3687 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3689 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3690 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3691 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3693 if (keys.enckeylen == AES_KEYSIZE_128) {
3694 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3695 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3696 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3697 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3698 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3700 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3703 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3704 aeadctx->enckey_len = keys.enckeylen;
3705 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3706 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3707 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3708 aeadctx->enckey_len << 3);
3710 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3712 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3713 0, key_ctx_len >> 4);
3714 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3715 memzero_explicit(&keys, sizeof(keys));
3718 aeadctx->enckey_len = 0;
3719 memzero_explicit(&keys, sizeof(keys));
3723 static int chcr_aead_op(struct aead_request *req,
3725 create_wr_t create_wr_fn)
3727 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3728 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3729 struct chcr_context *ctx = a_ctx(tfm);
3730 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3731 struct sk_buff *skb;
3732 struct chcr_dev *cdev;
3734 cdev = a_ctx(tfm)->dev;
3736 pr_err("%s : No crypto device.\n", __func__);
3740 if (chcr_inc_wrcount(cdev)) {
3741 /* Detach state for CHCR means lldi or padap is freed.
3742 * We cannot increment fallback here.
3744 return chcr_aead_fallback(req, reqctx->op);
3747 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3749 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3750 chcr_dec_wrcount(cdev);
3754 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3755 crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3756 pr_err("RFC4106: Invalid value of assoclen %d\n",
3761 /* Form a WR from req */
3762 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3764 if (IS_ERR_OR_NULL(skb)) {
3765 chcr_dec_wrcount(cdev);
3766 return PTR_ERR_OR_ZERO(skb);
3769 skb->dev = u_ctx->lldi.ports[0];
3770 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3772 return -EINPROGRESS;
3775 static int chcr_aead_encrypt(struct aead_request *req)
3777 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3778 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3779 struct chcr_context *ctx = a_ctx(tfm);
3783 reqctx->txqidx = cpu % ctx->ntxq;
3784 reqctx->rxqidx = cpu % ctx->nrxq;
3787 reqctx->verify = VERIFY_HW;
3788 reqctx->op = CHCR_ENCRYPT_OP;
3790 switch (get_aead_subtype(tfm)) {
3791 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3792 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3793 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3794 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3795 return chcr_aead_op(req, 0, create_authenc_wr);
3796 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3797 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3798 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3800 return chcr_aead_op(req, 0, create_gcm_wr);
3804 static int chcr_aead_decrypt(struct aead_request *req)
3806 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3807 struct chcr_context *ctx = a_ctx(tfm);
3808 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3809 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3814 reqctx->txqidx = cpu % ctx->ntxq;
3815 reqctx->rxqidx = cpu % ctx->nrxq;
3818 if (aeadctx->mayverify == VERIFY_SW) {
3819 size = crypto_aead_maxauthsize(tfm);
3820 reqctx->verify = VERIFY_SW;
3823 reqctx->verify = VERIFY_HW;
3825 reqctx->op = CHCR_DECRYPT_OP;
3826 switch (get_aead_subtype(tfm)) {
3827 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3828 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3829 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3830 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3831 return chcr_aead_op(req, size, create_authenc_wr);
3832 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3833 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3834 return chcr_aead_op(req, size, create_aead_ccm_wr);
3836 return chcr_aead_op(req, size, create_gcm_wr);
3840 static struct chcr_alg_template driver_algs[] = {
3843 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3846 .base.cra_name = "cbc(aes)",
3847 .base.cra_driver_name = "cbc-aes-chcr",
3848 .base.cra_blocksize = AES_BLOCK_SIZE,
3850 .init = chcr_init_tfm,
3851 .exit = chcr_exit_tfm,
3852 .min_keysize = AES_MIN_KEY_SIZE,
3853 .max_keysize = AES_MAX_KEY_SIZE,
3854 .ivsize = AES_BLOCK_SIZE,
3855 .setkey = chcr_aes_cbc_setkey,
3856 .encrypt = chcr_aes_encrypt,
3857 .decrypt = chcr_aes_decrypt,
3861 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3864 .base.cra_name = "xts(aes)",
3865 .base.cra_driver_name = "xts-aes-chcr",
3866 .base.cra_blocksize = AES_BLOCK_SIZE,
3868 .init = chcr_init_tfm,
3869 .exit = chcr_exit_tfm,
3870 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3871 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3872 .ivsize = AES_BLOCK_SIZE,
3873 .setkey = chcr_aes_xts_setkey,
3874 .encrypt = chcr_aes_encrypt,
3875 .decrypt = chcr_aes_decrypt,
3879 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3882 .base.cra_name = "ctr(aes)",
3883 .base.cra_driver_name = "ctr-aes-chcr",
3884 .base.cra_blocksize = 1,
3886 .init = chcr_init_tfm,
3887 .exit = chcr_exit_tfm,
3888 .min_keysize = AES_MIN_KEY_SIZE,
3889 .max_keysize = AES_MAX_KEY_SIZE,
3890 .ivsize = AES_BLOCK_SIZE,
3891 .setkey = chcr_aes_ctr_setkey,
3892 .encrypt = chcr_aes_encrypt,
3893 .decrypt = chcr_aes_decrypt,
3897 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3898 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3901 .base.cra_name = "rfc3686(ctr(aes))",
3902 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3903 .base.cra_blocksize = 1,
3905 .init = chcr_rfc3686_init,
3906 .exit = chcr_exit_tfm,
3907 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3908 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3909 .ivsize = CTR_RFC3686_IV_SIZE,
3910 .setkey = chcr_aes_rfc3686_setkey,
3911 .encrypt = chcr_aes_encrypt,
3912 .decrypt = chcr_aes_decrypt,
3917 .type = CRYPTO_ALG_TYPE_AHASH,
3920 .halg.digestsize = SHA1_DIGEST_SIZE,
3923 .cra_driver_name = "sha1-chcr",
3924 .cra_blocksize = SHA1_BLOCK_SIZE,
3929 .type = CRYPTO_ALG_TYPE_AHASH,
3932 .halg.digestsize = SHA256_DIGEST_SIZE,
3934 .cra_name = "sha256",
3935 .cra_driver_name = "sha256-chcr",
3936 .cra_blocksize = SHA256_BLOCK_SIZE,
3941 .type = CRYPTO_ALG_TYPE_AHASH,
3944 .halg.digestsize = SHA224_DIGEST_SIZE,
3946 .cra_name = "sha224",
3947 .cra_driver_name = "sha224-chcr",
3948 .cra_blocksize = SHA224_BLOCK_SIZE,
3953 .type = CRYPTO_ALG_TYPE_AHASH,
3956 .halg.digestsize = SHA384_DIGEST_SIZE,
3958 .cra_name = "sha384",
3959 .cra_driver_name = "sha384-chcr",
3960 .cra_blocksize = SHA384_BLOCK_SIZE,
3965 .type = CRYPTO_ALG_TYPE_AHASH,
3968 .halg.digestsize = SHA512_DIGEST_SIZE,
3970 .cra_name = "sha512",
3971 .cra_driver_name = "sha512-chcr",
3972 .cra_blocksize = SHA512_BLOCK_SIZE,
3978 .type = CRYPTO_ALG_TYPE_HMAC,
3981 .halg.digestsize = SHA1_DIGEST_SIZE,
3983 .cra_name = "hmac(sha1)",
3984 .cra_driver_name = "hmac-sha1-chcr",
3985 .cra_blocksize = SHA1_BLOCK_SIZE,
3990 .type = CRYPTO_ALG_TYPE_HMAC,
3993 .halg.digestsize = SHA224_DIGEST_SIZE,
3995 .cra_name = "hmac(sha224)",
3996 .cra_driver_name = "hmac-sha224-chcr",
3997 .cra_blocksize = SHA224_BLOCK_SIZE,
4002 .type = CRYPTO_ALG_TYPE_HMAC,
4005 .halg.digestsize = SHA256_DIGEST_SIZE,
4007 .cra_name = "hmac(sha256)",
4008 .cra_driver_name = "hmac-sha256-chcr",
4009 .cra_blocksize = SHA256_BLOCK_SIZE,
4014 .type = CRYPTO_ALG_TYPE_HMAC,
4017 .halg.digestsize = SHA384_DIGEST_SIZE,
4019 .cra_name = "hmac(sha384)",
4020 .cra_driver_name = "hmac-sha384-chcr",
4021 .cra_blocksize = SHA384_BLOCK_SIZE,
4026 .type = CRYPTO_ALG_TYPE_HMAC,
4029 .halg.digestsize = SHA512_DIGEST_SIZE,
4031 .cra_name = "hmac(sha512)",
4032 .cra_driver_name = "hmac-sha512-chcr",
4033 .cra_blocksize = SHA512_BLOCK_SIZE,
4037 /* Add AEAD Algorithms */
4039 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4043 .cra_name = "gcm(aes)",
4044 .cra_driver_name = "gcm-aes-chcr",
4046 .cra_priority = CHCR_AEAD_PRIORITY,
4047 .cra_ctxsize = sizeof(struct chcr_context) +
4048 sizeof(struct chcr_aead_ctx) +
4049 sizeof(struct chcr_gcm_ctx),
4051 .ivsize = GCM_AES_IV_SIZE,
4052 .maxauthsize = GHASH_DIGEST_SIZE,
4053 .setkey = chcr_gcm_setkey,
4054 .setauthsize = chcr_gcm_setauthsize,
4058 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4062 .cra_name = "rfc4106(gcm(aes))",
4063 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4065 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4066 .cra_ctxsize = sizeof(struct chcr_context) +
4067 sizeof(struct chcr_aead_ctx) +
4068 sizeof(struct chcr_gcm_ctx),
4071 .ivsize = GCM_RFC4106_IV_SIZE,
4072 .maxauthsize = GHASH_DIGEST_SIZE,
4073 .setkey = chcr_gcm_setkey,
4074 .setauthsize = chcr_4106_4309_setauthsize,
4078 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4082 .cra_name = "ccm(aes)",
4083 .cra_driver_name = "ccm-aes-chcr",
4085 .cra_priority = CHCR_AEAD_PRIORITY,
4086 .cra_ctxsize = sizeof(struct chcr_context) +
4087 sizeof(struct chcr_aead_ctx),
4090 .ivsize = AES_BLOCK_SIZE,
4091 .maxauthsize = GHASH_DIGEST_SIZE,
4092 .setkey = chcr_aead_ccm_setkey,
4093 .setauthsize = chcr_ccm_setauthsize,
4097 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4101 .cra_name = "rfc4309(ccm(aes))",
4102 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4104 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4105 .cra_ctxsize = sizeof(struct chcr_context) +
4106 sizeof(struct chcr_aead_ctx),
4110 .maxauthsize = GHASH_DIGEST_SIZE,
4111 .setkey = chcr_aead_rfc4309_setkey,
4112 .setauthsize = chcr_4106_4309_setauthsize,
4116 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4120 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4122 "authenc-hmac-sha1-cbc-aes-chcr",
4123 .cra_blocksize = AES_BLOCK_SIZE,
4124 .cra_priority = CHCR_AEAD_PRIORITY,
4125 .cra_ctxsize = sizeof(struct chcr_context) +
4126 sizeof(struct chcr_aead_ctx) +
4127 sizeof(struct chcr_authenc_ctx),
4130 .ivsize = AES_BLOCK_SIZE,
4131 .maxauthsize = SHA1_DIGEST_SIZE,
4132 .setkey = chcr_authenc_setkey,
4133 .setauthsize = chcr_authenc_setauthsize,
4137 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4142 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4144 "authenc-hmac-sha256-cbc-aes-chcr",
4145 .cra_blocksize = AES_BLOCK_SIZE,
4146 .cra_priority = CHCR_AEAD_PRIORITY,
4147 .cra_ctxsize = sizeof(struct chcr_context) +
4148 sizeof(struct chcr_aead_ctx) +
4149 sizeof(struct chcr_authenc_ctx),
4152 .ivsize = AES_BLOCK_SIZE,
4153 .maxauthsize = SHA256_DIGEST_SIZE,
4154 .setkey = chcr_authenc_setkey,
4155 .setauthsize = chcr_authenc_setauthsize,
4159 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4163 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4165 "authenc-hmac-sha224-cbc-aes-chcr",
4166 .cra_blocksize = AES_BLOCK_SIZE,
4167 .cra_priority = CHCR_AEAD_PRIORITY,
4168 .cra_ctxsize = sizeof(struct chcr_context) +
4169 sizeof(struct chcr_aead_ctx) +
4170 sizeof(struct chcr_authenc_ctx),
4172 .ivsize = AES_BLOCK_SIZE,
4173 .maxauthsize = SHA224_DIGEST_SIZE,
4174 .setkey = chcr_authenc_setkey,
4175 .setauthsize = chcr_authenc_setauthsize,
4179 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4183 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4185 "authenc-hmac-sha384-cbc-aes-chcr",
4186 .cra_blocksize = AES_BLOCK_SIZE,
4187 .cra_priority = CHCR_AEAD_PRIORITY,
4188 .cra_ctxsize = sizeof(struct chcr_context) +
4189 sizeof(struct chcr_aead_ctx) +
4190 sizeof(struct chcr_authenc_ctx),
4193 .ivsize = AES_BLOCK_SIZE,
4194 .maxauthsize = SHA384_DIGEST_SIZE,
4195 .setkey = chcr_authenc_setkey,
4196 .setauthsize = chcr_authenc_setauthsize,
4200 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4204 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4206 "authenc-hmac-sha512-cbc-aes-chcr",
4207 .cra_blocksize = AES_BLOCK_SIZE,
4208 .cra_priority = CHCR_AEAD_PRIORITY,
4209 .cra_ctxsize = sizeof(struct chcr_context) +
4210 sizeof(struct chcr_aead_ctx) +
4211 sizeof(struct chcr_authenc_ctx),
4214 .ivsize = AES_BLOCK_SIZE,
4215 .maxauthsize = SHA512_DIGEST_SIZE,
4216 .setkey = chcr_authenc_setkey,
4217 .setauthsize = chcr_authenc_setauthsize,
4221 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4225 .cra_name = "authenc(digest_null,cbc(aes))",
4227 "authenc-digest_null-cbc-aes-chcr",
4228 .cra_blocksize = AES_BLOCK_SIZE,
4229 .cra_priority = CHCR_AEAD_PRIORITY,
4230 .cra_ctxsize = sizeof(struct chcr_context) +
4231 sizeof(struct chcr_aead_ctx) +
4232 sizeof(struct chcr_authenc_ctx),
4235 .ivsize = AES_BLOCK_SIZE,
4237 .setkey = chcr_aead_digest_null_setkey,
4238 .setauthsize = chcr_authenc_null_setauthsize,
4242 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4246 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4248 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4250 .cra_priority = CHCR_AEAD_PRIORITY,
4251 .cra_ctxsize = sizeof(struct chcr_context) +
4252 sizeof(struct chcr_aead_ctx) +
4253 sizeof(struct chcr_authenc_ctx),
4256 .ivsize = CTR_RFC3686_IV_SIZE,
4257 .maxauthsize = SHA1_DIGEST_SIZE,
4258 .setkey = chcr_authenc_setkey,
4259 .setauthsize = chcr_authenc_setauthsize,
4263 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4268 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4270 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4272 .cra_priority = CHCR_AEAD_PRIORITY,
4273 .cra_ctxsize = sizeof(struct chcr_context) +
4274 sizeof(struct chcr_aead_ctx) +
4275 sizeof(struct chcr_authenc_ctx),
4278 .ivsize = CTR_RFC3686_IV_SIZE,
4279 .maxauthsize = SHA256_DIGEST_SIZE,
4280 .setkey = chcr_authenc_setkey,
4281 .setauthsize = chcr_authenc_setauthsize,
4285 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4289 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4291 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4293 .cra_priority = CHCR_AEAD_PRIORITY,
4294 .cra_ctxsize = sizeof(struct chcr_context) +
4295 sizeof(struct chcr_aead_ctx) +
4296 sizeof(struct chcr_authenc_ctx),
4298 .ivsize = CTR_RFC3686_IV_SIZE,
4299 .maxauthsize = SHA224_DIGEST_SIZE,
4300 .setkey = chcr_authenc_setkey,
4301 .setauthsize = chcr_authenc_setauthsize,
4305 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4309 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4311 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4313 .cra_priority = CHCR_AEAD_PRIORITY,
4314 .cra_ctxsize = sizeof(struct chcr_context) +
4315 sizeof(struct chcr_aead_ctx) +
4316 sizeof(struct chcr_authenc_ctx),
4319 .ivsize = CTR_RFC3686_IV_SIZE,
4320 .maxauthsize = SHA384_DIGEST_SIZE,
4321 .setkey = chcr_authenc_setkey,
4322 .setauthsize = chcr_authenc_setauthsize,
4326 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4330 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4332 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4334 .cra_priority = CHCR_AEAD_PRIORITY,
4335 .cra_ctxsize = sizeof(struct chcr_context) +
4336 sizeof(struct chcr_aead_ctx) +
4337 sizeof(struct chcr_authenc_ctx),
4340 .ivsize = CTR_RFC3686_IV_SIZE,
4341 .maxauthsize = SHA512_DIGEST_SIZE,
4342 .setkey = chcr_authenc_setkey,
4343 .setauthsize = chcr_authenc_setauthsize,
4347 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4351 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4353 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4355 .cra_priority = CHCR_AEAD_PRIORITY,
4356 .cra_ctxsize = sizeof(struct chcr_context) +
4357 sizeof(struct chcr_aead_ctx) +
4358 sizeof(struct chcr_authenc_ctx),
4361 .ivsize = CTR_RFC3686_IV_SIZE,
4363 .setkey = chcr_aead_digest_null_setkey,
4364 .setauthsize = chcr_authenc_null_setauthsize,
4370 * chcr_unregister_alg - Deregister crypto algorithms with
4373 static int chcr_unregister_alg(void)
4377 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4378 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4379 case CRYPTO_ALG_TYPE_SKCIPHER:
4380 if (driver_algs[i].is_registered && refcount_read(
4381 &driver_algs[i].alg.skcipher.base.cra_refcnt)
4383 crypto_unregister_skcipher(
4384 &driver_algs[i].alg.skcipher);
4385 driver_algs[i].is_registered = 0;
4388 case CRYPTO_ALG_TYPE_AEAD:
4389 if (driver_algs[i].is_registered && refcount_read(
4390 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4391 crypto_unregister_aead(
4392 &driver_algs[i].alg.aead);
4393 driver_algs[i].is_registered = 0;
4396 case CRYPTO_ALG_TYPE_AHASH:
4397 if (driver_algs[i].is_registered && refcount_read(
4398 &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4400 crypto_unregister_ahash(
4401 &driver_algs[i].alg.hash);
4402 driver_algs[i].is_registered = 0;
4410 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4411 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4412 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4415 * chcr_register_alg - Register crypto algorithms with kernel framework.
4417 static int chcr_register_alg(void)
4419 struct crypto_alg ai;
4420 struct ahash_alg *a_hash;
4424 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4425 if (driver_algs[i].is_registered)
4427 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4428 case CRYPTO_ALG_TYPE_SKCIPHER:
4429 driver_algs[i].alg.skcipher.base.cra_priority =
4431 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4432 driver_algs[i].alg.skcipher.base.cra_flags =
4433 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4434 CRYPTO_ALG_ALLOCATES_MEMORY |
4435 CRYPTO_ALG_NEED_FALLBACK;
4436 driver_algs[i].alg.skcipher.base.cra_ctxsize =
4437 sizeof(struct chcr_context) +
4438 sizeof(struct ablk_ctx);
4439 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4441 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4442 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4444 case CRYPTO_ALG_TYPE_AEAD:
4445 driver_algs[i].alg.aead.base.cra_flags =
4446 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4447 CRYPTO_ALG_ALLOCATES_MEMORY;
4448 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4449 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4450 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4451 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4452 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4453 err = crypto_register_aead(&driver_algs[i].alg.aead);
4454 name = driver_algs[i].alg.aead.base.cra_driver_name;
4456 case CRYPTO_ALG_TYPE_AHASH:
4457 a_hash = &driver_algs[i].alg.hash;
4458 a_hash->update = chcr_ahash_update;
4459 a_hash->final = chcr_ahash_final;
4460 a_hash->finup = chcr_ahash_finup;
4461 a_hash->digest = chcr_ahash_digest;
4462 a_hash->export = chcr_ahash_export;
4463 a_hash->import = chcr_ahash_import;
4464 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4465 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4466 a_hash->halg.base.cra_module = THIS_MODULE;
4467 a_hash->halg.base.cra_flags =
4468 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4469 a_hash->halg.base.cra_alignmask = 0;
4470 a_hash->halg.base.cra_exit = NULL;
4472 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4473 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4474 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4475 a_hash->init = chcr_hmac_init;
4476 a_hash->setkey = chcr_ahash_setkey;
4477 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4479 a_hash->init = chcr_sha_init;
4480 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4481 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4483 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4484 ai = driver_algs[i].alg.hash.halg.base;
4485 name = ai.cra_driver_name;
4489 pr_err("%s : Algorithm registration failed\n", name);
4492 driver_algs[i].is_registered = 1;
4498 chcr_unregister_alg();
4503 * start_crypto - Register the crypto algorithms.
4504 * This should called once when the first device comesup. After this
4505 * kernel will start calling driver APIs for crypto operations.
4507 int start_crypto(void)
4509 return chcr_register_alg();
4513 * stop_crypto - Deregister all the crypto algorithms with kernel.
4514 * This should be called once when the last device goes down. After this
4515 * kernel will not call the driver API for crypto operations.
4517 int stop_crypto(void)
4519 chcr_unregister_alg();