Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
[linux-2.6-microblaze.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya (manojmalviya@chelsio.com)
36  *      Atul Gupta (atul.gupta@chelsio.com)
37  *      Jitendra Lulla (jlulla@chelsio.com)
38  *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *      Harsh Jain (harsh@chelsio.com)
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72
73 #define IV AES_BLOCK_SIZE
74
75 static unsigned int sgl_ent_len[] = {
76         0, 0, 16, 24, 40, 48, 64, 72, 88,
77         96, 112, 120, 136, 144, 160, 168, 184,
78         192, 208, 216, 232, 240, 256, 264, 280,
79         288, 304, 312, 328, 336, 352, 360, 376
80 };
81
82 static unsigned int dsgl_ent_len[] = {
83         0, 32, 32, 48, 48, 64, 64, 80, 80,
84         112, 112, 128, 128, 144, 144, 160, 160,
85         192, 192, 208, 208, 224, 224, 240, 240,
86         272, 272, 288, 288, 304, 304, 320, 320
87 };
88
89 static u32 round_constant[11] = {
90         0x01000000, 0x02000000, 0x04000000, 0x08000000,
91         0x10000000, 0x20000000, 0x40000000, 0x80000000,
92         0x1B000000, 0x36000000, 0x6C000000
93 };
94
95 static int chcr_handle_cipher_resp(struct skcipher_request *req,
96                                    unsigned char *input, int err);
97
98 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99 {
100         return ctx->crypto_ctx->aeadctx;
101 }
102
103 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104 {
105         return ctx->crypto_ctx->ablkctx;
106 }
107
108 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109 {
110         return ctx->crypto_ctx->hmacctx;
111 }
112
113 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114 {
115         return gctx->ctx->gcm;
116 }
117
118 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119 {
120         return gctx->ctx->authenc;
121 }
122
123 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124 {
125         return container_of(ctx->dev, struct uld_ctx, dev);
126 }
127
128 static inline int is_ofld_imm(const struct sk_buff *skb)
129 {
130         return (skb->len <= SGE_MAX_WR_LEN);
131 }
132
133 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
134 {
135         memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
136 }
137
138 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
139                          unsigned int entlen,
140                          unsigned int skip)
141 {
142         int nents = 0;
143         unsigned int less;
144         unsigned int skip_len = 0;
145
146         while (sg && skip) {
147                 if (sg_dma_len(sg) <= skip) {
148                         skip -= sg_dma_len(sg);
149                         skip_len = 0;
150                         sg = sg_next(sg);
151                 } else {
152                         skip_len = skip;
153                         skip = 0;
154                 }
155         }
156
157         while (sg && reqlen) {
158                 less = min(reqlen, sg_dma_len(sg) - skip_len);
159                 nents += DIV_ROUND_UP(less, entlen);
160                 reqlen -= less;
161                 skip_len = 0;
162                 sg = sg_next(sg);
163         }
164         return nents;
165 }
166
167 static inline int get_aead_subtype(struct crypto_aead *aead)
168 {
169         struct aead_alg *alg = crypto_aead_alg(aead);
170         struct chcr_alg_template *chcr_crypto_alg =
171                 container_of(alg, struct chcr_alg_template, alg.aead);
172         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
173 }
174
175 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
176 {
177         u8 temp[SHA512_DIGEST_SIZE];
178         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179         int authsize = crypto_aead_authsize(tfm);
180         struct cpl_fw6_pld *fw6_pld;
181         int cmp = 0;
182
183         fw6_pld = (struct cpl_fw6_pld *)input;
184         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
185             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
186                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
187         } else {
188
189                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
190                                 authsize, req->assoclen +
191                                 req->cryptlen - authsize);
192                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
193         }
194         if (cmp)
195                 *err = -EBADMSG;
196         else
197                 *err = 0;
198 }
199
200 static int chcr_inc_wrcount(struct chcr_dev *dev)
201 {
202         if (dev->state == CHCR_DETACH)
203                 return 1;
204         atomic_inc(&dev->inflight);
205         return 0;
206 }
207
208 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209 {
210         atomic_dec(&dev->inflight);
211 }
212
213 static inline int chcr_handle_aead_resp(struct aead_request *req,
214                                          unsigned char *input,
215                                          int err)
216 {
217         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
218         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219         struct chcr_dev *dev = a_ctx(tfm)->dev;
220
221         chcr_aead_common_exit(req);
222         if (reqctx->verify == VERIFY_SW) {
223                 chcr_verify_tag(req, input, &err);
224                 reqctx->verify = VERIFY_HW;
225         }
226         chcr_dec_wrcount(dev);
227         req->base.complete(&req->base, err);
228
229         return err;
230 }
231
232 static void get_aes_decrypt_key(unsigned char *dec_key,
233                                        const unsigned char *key,
234                                        unsigned int keylength)
235 {
236         u32 temp;
237         u32 w_ring[MAX_NK];
238         int i, j, k;
239         u8  nr, nk;
240
241         switch (keylength) {
242         case AES_KEYLENGTH_128BIT:
243                 nk = KEYLENGTH_4BYTES;
244                 nr = NUMBER_OF_ROUNDS_10;
245                 break;
246         case AES_KEYLENGTH_192BIT:
247                 nk = KEYLENGTH_6BYTES;
248                 nr = NUMBER_OF_ROUNDS_12;
249                 break;
250         case AES_KEYLENGTH_256BIT:
251                 nk = KEYLENGTH_8BYTES;
252                 nr = NUMBER_OF_ROUNDS_14;
253                 break;
254         default:
255                 return;
256         }
257         for (i = 0; i < nk; i++)
258                 w_ring[i] = get_unaligned_be32(&key[i * 4]);
259
260         i = 0;
261         temp = w_ring[nk - 1];
262         while (i + nk < (nr + 1) * 4) {
263                 if (!(i % nk)) {
264                         /* RotWord(temp) */
265                         temp = (temp << 8) | (temp >> 24);
266                         temp = aes_ks_subword(temp);
267                         temp ^= round_constant[i / nk];
268                 } else if (nk == 8 && (i % 4 == 0)) {
269                         temp = aes_ks_subword(temp);
270                 }
271                 w_ring[i % nk] ^= temp;
272                 temp = w_ring[i % nk];
273                 i++;
274         }
275         i--;
276         for (k = 0, j = i % nk; k < nk; k++) {
277                 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
278                 j--;
279                 if (j < 0)
280                         j += nk;
281         }
282 }
283
284 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
285 {
286         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
287
288         switch (ds) {
289         case SHA1_DIGEST_SIZE:
290                 base_hash = crypto_alloc_shash("sha1", 0, 0);
291                 break;
292         case SHA224_DIGEST_SIZE:
293                 base_hash = crypto_alloc_shash("sha224", 0, 0);
294                 break;
295         case SHA256_DIGEST_SIZE:
296                 base_hash = crypto_alloc_shash("sha256", 0, 0);
297                 break;
298         case SHA384_DIGEST_SIZE:
299                 base_hash = crypto_alloc_shash("sha384", 0, 0);
300                 break;
301         case SHA512_DIGEST_SIZE:
302                 base_hash = crypto_alloc_shash("sha512", 0, 0);
303                 break;
304         }
305
306         return base_hash;
307 }
308
309 static int chcr_compute_partial_hash(struct shash_desc *desc,
310                                      char *iopad, char *result_hash,
311                                      int digest_size)
312 {
313         struct sha1_state sha1_st;
314         struct sha256_state sha256_st;
315         struct sha512_state sha512_st;
316         int error;
317
318         if (digest_size == SHA1_DIGEST_SIZE) {
319                 error = crypto_shash_init(desc) ?:
320                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
321                         crypto_shash_export(desc, (void *)&sha1_st);
322                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
323         } else if (digest_size == SHA224_DIGEST_SIZE) {
324                 error = crypto_shash_init(desc) ?:
325                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
326                         crypto_shash_export(desc, (void *)&sha256_st);
327                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
328
329         } else if (digest_size == SHA256_DIGEST_SIZE) {
330                 error = crypto_shash_init(desc) ?:
331                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332                         crypto_shash_export(desc, (void *)&sha256_st);
333                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334
335         } else if (digest_size == SHA384_DIGEST_SIZE) {
336                 error = crypto_shash_init(desc) ?:
337                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
338                         crypto_shash_export(desc, (void *)&sha512_st);
339                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
340
341         } else if (digest_size == SHA512_DIGEST_SIZE) {
342                 error = crypto_shash_init(desc) ?:
343                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344                         crypto_shash_export(desc, (void *)&sha512_st);
345                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346         } else {
347                 error = -EINVAL;
348                 pr_err("Unknown digest size %d\n", digest_size);
349         }
350         return error;
351 }
352
353 static void chcr_change_order(char *buf, int ds)
354 {
355         int i;
356
357         if (ds == SHA512_DIGEST_SIZE) {
358                 for (i = 0; i < (ds / sizeof(u64)); i++)
359                         *((__be64 *)buf + i) =
360                                 cpu_to_be64(*((u64 *)buf + i));
361         } else {
362                 for (i = 0; i < (ds / sizeof(u32)); i++)
363                         *((__be32 *)buf + i) =
364                                 cpu_to_be32(*((u32 *)buf + i));
365         }
366 }
367
368 static inline int is_hmac(struct crypto_tfm *tfm)
369 {
370         struct crypto_alg *alg = tfm->__crt_alg;
371         struct chcr_alg_template *chcr_crypto_alg =
372                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
373                              alg.hash);
374         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
375                 return 1;
376         return 0;
377 }
378
379 static inline void dsgl_walk_init(struct dsgl_walk *walk,
380                                    struct cpl_rx_phys_dsgl *dsgl)
381 {
382         walk->dsgl = dsgl;
383         walk->nents = 0;
384         walk->to = (struct phys_sge_pairs *)(dsgl + 1);
385 }
386
387 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
388                                  int pci_chan_id)
389 {
390         struct cpl_rx_phys_dsgl *phys_cpl;
391
392         phys_cpl = walk->dsgl;
393
394         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
396         phys_cpl->pcirlxorder_to_noofsgentr =
397                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
402                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
403         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404         phys_cpl->rss_hdr_int.qid = htons(qid);
405         phys_cpl->rss_hdr_int.hash_val = 0;
406         phys_cpl->rss_hdr_int.channel = pci_chan_id;
407 }
408
409 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
410                                         size_t size,
411                                         dma_addr_t addr)
412 {
413         int j;
414
415         if (!size)
416                 return;
417         j = walk->nents;
418         walk->to->len[j % 8] = htons(size);
419         walk->to->addr[j % 8] = cpu_to_be64(addr);
420         j++;
421         if ((j % 8) == 0)
422                 walk->to++;
423         walk->nents = j;
424 }
425
426 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
427                            struct scatterlist *sg,
428                               unsigned int slen,
429                               unsigned int skip)
430 {
431         int skip_len = 0;
432         unsigned int left_size = slen, len = 0;
433         unsigned int j = walk->nents;
434         int offset, ent_len;
435
436         if (!slen)
437                 return;
438         while (sg && skip) {
439                 if (sg_dma_len(sg) <= skip) {
440                         skip -= sg_dma_len(sg);
441                         skip_len = 0;
442                         sg = sg_next(sg);
443                 } else {
444                         skip_len = skip;
445                         skip = 0;
446                 }
447         }
448
449         while (left_size && sg) {
450                 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
451                 offset = 0;
452                 while (len) {
453                         ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
454                         walk->to->len[j % 8] = htons(ent_len);
455                         walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
456                                                       offset + skip_len);
457                         offset += ent_len;
458                         len -= ent_len;
459                         j++;
460                         if ((j % 8) == 0)
461                                 walk->to++;
462                 }
463                 walk->last_sg = sg;
464                 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
465                                           skip_len) + skip_len;
466                 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
467                 skip_len = 0;
468                 sg = sg_next(sg);
469         }
470         walk->nents = j;
471 }
472
473 static inline void ulptx_walk_init(struct ulptx_walk *walk,
474                                    struct ulptx_sgl *ulp)
475 {
476         walk->sgl = ulp;
477         walk->nents = 0;
478         walk->pair_idx = 0;
479         walk->pair = ulp->sge;
480         walk->last_sg = NULL;
481         walk->last_sg_len = 0;
482 }
483
484 static inline void ulptx_walk_end(struct ulptx_walk *walk)
485 {
486         walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
487                               ULPTX_NSGE_V(walk->nents));
488 }
489
490
491 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
492                                         size_t size,
493                                         dma_addr_t addr)
494 {
495         if (!size)
496                 return;
497
498         if (walk->nents == 0) {
499                 walk->sgl->len0 = cpu_to_be32(size);
500                 walk->sgl->addr0 = cpu_to_be64(addr);
501         } else {
502                 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
503                 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
504                 walk->pair_idx = !walk->pair_idx;
505                 if (!walk->pair_idx)
506                         walk->pair++;
507         }
508         walk->nents++;
509 }
510
511 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
512                                         struct scatterlist *sg,
513                                unsigned int len,
514                                unsigned int skip)
515 {
516         int small;
517         int skip_len = 0;
518         unsigned int sgmin;
519
520         if (!len)
521                 return;
522         while (sg && skip) {
523                 if (sg_dma_len(sg) <= skip) {
524                         skip -= sg_dma_len(sg);
525                         skip_len = 0;
526                         sg = sg_next(sg);
527                 } else {
528                         skip_len = skip;
529                         skip = 0;
530                 }
531         }
532         WARN(!sg, "SG should not be null here\n");
533         if (sg && (walk->nents == 0)) {
534                 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
535                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
536                 walk->sgl->len0 = cpu_to_be32(sgmin);
537                 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
538                 walk->nents++;
539                 len -= sgmin;
540                 walk->last_sg = sg;
541                 walk->last_sg_len = sgmin + skip_len;
542                 skip_len += sgmin;
543                 if (sg_dma_len(sg) == skip_len) {
544                         sg = sg_next(sg);
545                         skip_len = 0;
546                 }
547         }
548
549         while (sg && len) {
550                 small = min(sg_dma_len(sg) - skip_len, len);
551                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
552                 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
553                 walk->pair->addr[walk->pair_idx] =
554                         cpu_to_be64(sg_dma_address(sg) + skip_len);
555                 walk->pair_idx = !walk->pair_idx;
556                 walk->nents++;
557                 if (!walk->pair_idx)
558                         walk->pair++;
559                 len -= sgmin;
560                 skip_len += sgmin;
561                 walk->last_sg = sg;
562                 walk->last_sg_len = skip_len;
563                 if (sg_dma_len(sg) == skip_len) {
564                         sg = sg_next(sg);
565                         skip_len = 0;
566                 }
567         }
568 }
569
570 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
571 {
572         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
573         struct chcr_alg_template *chcr_crypto_alg =
574                 container_of(alg, struct chcr_alg_template, alg.skcipher);
575
576         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
577 }
578
579 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
580 {
581         struct adapter *adap = netdev2adap(dev);
582         struct sge_uld_txq_info *txq_info =
583                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
584         struct sge_uld_txq *txq;
585         int ret = 0;
586
587         local_bh_disable();
588         txq = &txq_info->uldtxq[idx];
589         spin_lock(&txq->sendq.lock);
590         if (txq->full)
591                 ret = -1;
592         spin_unlock(&txq->sendq.lock);
593         local_bh_enable();
594         return ret;
595 }
596
597 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
598                                struct _key_ctx *key_ctx)
599 {
600         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
601                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
602         } else {
603                 memcpy(key_ctx->key,
604                        ablkctx->key + (ablkctx->enckey_len >> 1),
605                        ablkctx->enckey_len >> 1);
606                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
607                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
608         }
609         return 0;
610 }
611
612 static int chcr_hash_ent_in_wr(struct scatterlist *src,
613                              unsigned int minsg,
614                              unsigned int space,
615                              unsigned int srcskip)
616 {
617         int srclen = 0;
618         int srcsg = minsg;
619         int soffset = 0, sless;
620
621         if (sg_dma_len(src) == srcskip) {
622                 src = sg_next(src);
623                 srcskip = 0;
624         }
625         while (src && space > (sgl_ent_len[srcsg + 1])) {
626                 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
627                                                         CHCR_SRC_SG_SIZE);
628                 srclen += sless;
629                 soffset += sless;
630                 srcsg++;
631                 if (sg_dma_len(src) == (soffset + srcskip)) {
632                         src = sg_next(src);
633                         soffset = 0;
634                         srcskip = 0;
635                 }
636         }
637         return srclen;
638 }
639
640 static int chcr_sg_ent_in_wr(struct scatterlist *src,
641                              struct scatterlist *dst,
642                              unsigned int minsg,
643                              unsigned int space,
644                              unsigned int srcskip,
645                              unsigned int dstskip)
646 {
647         int srclen = 0, dstlen = 0;
648         int srcsg = minsg, dstsg = minsg;
649         int offset = 0, soffset = 0, less, sless = 0;
650
651         if (sg_dma_len(src) == srcskip) {
652                 src = sg_next(src);
653                 srcskip = 0;
654         }
655         if (sg_dma_len(dst) == dstskip) {
656                 dst = sg_next(dst);
657                 dstskip = 0;
658         }
659
660         while (src && dst &&
661                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
662                 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
663                                 CHCR_SRC_SG_SIZE);
664                 srclen += sless;
665                 srcsg++;
666                 offset = 0;
667                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
668                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
669                         if (srclen <= dstlen)
670                                 break;
671                         less = min_t(unsigned int, sg_dma_len(dst) - offset -
672                                      dstskip, CHCR_DST_SG_SIZE);
673                         dstlen += less;
674                         offset += less;
675                         if ((offset + dstskip) == sg_dma_len(dst)) {
676                                 dst = sg_next(dst);
677                                 offset = 0;
678                         }
679                         dstsg++;
680                         dstskip = 0;
681                 }
682                 soffset += sless;
683                 if ((soffset + srcskip) == sg_dma_len(src)) {
684                         src = sg_next(src);
685                         srcskip = 0;
686                         soffset = 0;
687                 }
688
689         }
690         return min(srclen, dstlen);
691 }
692
693 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
694                                 u32 flags,
695                                 struct scatterlist *src,
696                                 struct scatterlist *dst,
697                                 unsigned int nbytes,
698                                 u8 *iv,
699                                 unsigned short op_type)
700 {
701         int err;
702
703         SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
704
705         skcipher_request_set_sync_tfm(subreq, cipher);
706         skcipher_request_set_callback(subreq, flags, NULL, NULL);
707         skcipher_request_set_crypt(subreq, src, dst,
708                                    nbytes, iv);
709
710         err = op_type ? crypto_skcipher_decrypt(subreq) :
711                 crypto_skcipher_encrypt(subreq);
712         skcipher_request_zero(subreq);
713
714         return err;
715
716 }
717
718 static inline int get_qidxs(struct crypto_async_request *req,
719                             unsigned int *txqidx, unsigned int *rxqidx)
720 {
721         struct crypto_tfm *tfm = req->tfm;
722         int ret = 0;
723
724         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
725         case CRYPTO_ALG_TYPE_AEAD:
726         {
727                 struct aead_request *aead_req =
728                         container_of(req, struct aead_request, base);
729                 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
730                 *txqidx = reqctx->txqidx;
731                 *rxqidx = reqctx->rxqidx;
732                 break;
733         }
734         case CRYPTO_ALG_TYPE_SKCIPHER:
735         {
736                 struct skcipher_request *sk_req =
737                         container_of(req, struct skcipher_request, base);
738                 struct chcr_skcipher_req_ctx *reqctx =
739                         skcipher_request_ctx(sk_req);
740                 *txqidx = reqctx->txqidx;
741                 *rxqidx = reqctx->rxqidx;
742                 break;
743         }
744         case CRYPTO_ALG_TYPE_AHASH:
745         {
746                 struct ahash_request *ahash_req =
747                         container_of(req, struct ahash_request, base);
748                 struct chcr_ahash_req_ctx *reqctx =
749                         ahash_request_ctx(ahash_req);
750                 *txqidx = reqctx->txqidx;
751                 *rxqidx = reqctx->rxqidx;
752                 break;
753         }
754         default:
755                 ret = -EINVAL;
756                 /* should never get here */
757                 BUG();
758                 break;
759         }
760         return ret;
761 }
762
763 static inline void create_wreq(struct chcr_context *ctx,
764                                struct chcr_wr *chcr_req,
765                                struct crypto_async_request *req,
766                                unsigned int imm,
767                                int hash_sz,
768                                unsigned int len16,
769                                unsigned int sc_len,
770                                unsigned int lcb)
771 {
772         struct uld_ctx *u_ctx = ULD_CTX(ctx);
773         unsigned int tx_channel_id, rx_channel_id;
774         unsigned int txqidx = 0, rxqidx = 0;
775         unsigned int qid, fid;
776
777         get_qidxs(req, &txqidx, &rxqidx);
778         qid = u_ctx->lldi.rxq_ids[rxqidx];
779         fid = u_ctx->lldi.rxq_ids[0];
780         tx_channel_id = txqidx / ctx->txq_perchan;
781         rx_channel_id = rxqidx / ctx->rxq_perchan;
782
783
784         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
785         chcr_req->wreq.pld_size_hash_size =
786                 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
787         chcr_req->wreq.len16_pkd =
788                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
789         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
790         chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
791                                                             !!lcb, txqidx);
792
793         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
794         chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
795                                 ((sizeof(chcr_req->wreq)) >> 4)));
796         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
797         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
798                                            sizeof(chcr_req->key_ctx) + sc_len);
799 }
800
801 /**
802  *      create_cipher_wr - form the WR for cipher operations
803  *      @req: cipher req.
804  *      @ctx: crypto driver context of the request.
805  *      @qid: ingress qid where response of this WR should be received.
806  *      @op_type:       encryption or decryption
807  */
808 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
809 {
810         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
811         struct chcr_context *ctx = c_ctx(tfm);
812         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
813         struct sk_buff *skb = NULL;
814         struct chcr_wr *chcr_req;
815         struct cpl_rx_phys_dsgl *phys_cpl;
816         struct ulptx_sgl *ulptx;
817         struct chcr_skcipher_req_ctx *reqctx =
818                 skcipher_request_ctx(wrparam->req);
819         unsigned int temp = 0, transhdr_len, dst_size;
820         int error;
821         int nents;
822         unsigned int kctx_len;
823         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
824                         GFP_KERNEL : GFP_ATOMIC;
825         struct adapter *adap = padap(ctx->dev);
826         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
827
828         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
829                               reqctx->dst_ofst);
830         dst_size = get_space_for_phys_dsgl(nents);
831         kctx_len = roundup(ablkctx->enckey_len, 16);
832         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
833         nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
834                                   CHCR_SRC_SG_SIZE, reqctx->src_ofst);
835         temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
836                                      (sgl_len(nents) * 8);
837         transhdr_len += temp;
838         transhdr_len = roundup(transhdr_len, 16);
839         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
840         if (!skb) {
841                 error = -ENOMEM;
842                 goto err;
843         }
844         chcr_req = __skb_put_zero(skb, transhdr_len);
845         chcr_req->sec_cpl.op_ivinsrtofst =
846                         FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
847
848         chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
849         chcr_req->sec_cpl.aadstart_cipherstop_hi =
850                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
851
852         chcr_req->sec_cpl.cipherstop_lo_authinsert =
853                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
854         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
855                                                          ablkctx->ciph_mode,
856                                                          0, 0, IV >> 1);
857         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
858                                                           0, 1, dst_size);
859
860         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
861         if ((reqctx->op == CHCR_DECRYPT_OP) &&
862             (!(get_cryptoalg_subtype(tfm) ==
863                CRYPTO_ALG_SUB_TYPE_CTR)) &&
864             (!(get_cryptoalg_subtype(tfm) ==
865                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
866                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
867         } else {
868                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
869                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
870                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
871                                ablkctx->enckey_len);
872                 } else {
873                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
874                                (ablkctx->enckey_len >> 1),
875                                ablkctx->enckey_len >> 1);
876                         memcpy(chcr_req->key_ctx.key +
877                                (ablkctx->enckey_len >> 1),
878                                ablkctx->key,
879                                ablkctx->enckey_len >> 1);
880                 }
881         }
882         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
883         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
884         chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
885         chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
886
887         atomic_inc(&adap->chcr_stats.cipher_rqst);
888         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
889                 + (reqctx->imm ? (wrparam->bytes) : 0);
890         create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
891                     transhdr_len, temp,
892                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
893         reqctx->skb = skb;
894
895         if (reqctx->op && (ablkctx->ciph_mode ==
896                            CHCR_SCMD_CIPHER_MODE_AES_CBC))
897                 sg_pcopy_to_buffer(wrparam->req->src,
898                         sg_nents(wrparam->req->src), wrparam->req->iv, 16,
899                         reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
900
901         return skb;
902 err:
903         return ERR_PTR(error);
904 }
905
906 static inline int chcr_keyctx_ck_size(unsigned int keylen)
907 {
908         int ck_size = 0;
909
910         if (keylen == AES_KEYSIZE_128)
911                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
912         else if (keylen == AES_KEYSIZE_192)
913                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
914         else if (keylen == AES_KEYSIZE_256)
915                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
916         else
917                 ck_size = 0;
918
919         return ck_size;
920 }
921 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
922                                        const u8 *key,
923                                        unsigned int keylen)
924 {
925         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926
927         crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
928                                 CRYPTO_TFM_REQ_MASK);
929         crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
930                                 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
931         return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
932 }
933
934 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
935                                const u8 *key,
936                                unsigned int keylen)
937 {
938         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
939         unsigned int ck_size, context_size;
940         u16 alignment = 0;
941         int err;
942
943         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
944         if (err)
945                 goto badkey_err;
946
947         ck_size = chcr_keyctx_ck_size(keylen);
948         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
949         memcpy(ablkctx->key, key, keylen);
950         ablkctx->enckey_len = keylen;
951         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
952         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
953                         keylen + alignment) >> 4;
954
955         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
956                                                 0, 0, context_size);
957         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
958         return 0;
959 badkey_err:
960         ablkctx->enckey_len = 0;
961
962         return err;
963 }
964
965 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
966                                    const u8 *key,
967                                    unsigned int keylen)
968 {
969         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
970         unsigned int ck_size, context_size;
971         u16 alignment = 0;
972         int err;
973
974         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
975         if (err)
976                 goto badkey_err;
977         ck_size = chcr_keyctx_ck_size(keylen);
978         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
979         memcpy(ablkctx->key, key, keylen);
980         ablkctx->enckey_len = keylen;
981         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
982                         keylen + alignment) >> 4;
983
984         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
985                                                 0, 0, context_size);
986         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
987
988         return 0;
989 badkey_err:
990         ablkctx->enckey_len = 0;
991
992         return err;
993 }
994
995 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
996                                    const u8 *key,
997                                    unsigned int keylen)
998 {
999         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
1000         unsigned int ck_size, context_size;
1001         u16 alignment = 0;
1002         int err;
1003
1004         if (keylen < CTR_RFC3686_NONCE_SIZE)
1005                 return -EINVAL;
1006         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1007                CTR_RFC3686_NONCE_SIZE);
1008
1009         keylen -= CTR_RFC3686_NONCE_SIZE;
1010         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1011         if (err)
1012                 goto badkey_err;
1013
1014         ck_size = chcr_keyctx_ck_size(keylen);
1015         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1016         memcpy(ablkctx->key, key, keylen);
1017         ablkctx->enckey_len = keylen;
1018         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1019                         keylen + alignment) >> 4;
1020
1021         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1022                                                 0, 0, context_size);
1023         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1024
1025         return 0;
1026 badkey_err:
1027         ablkctx->enckey_len = 0;
1028
1029         return err;
1030 }
1031 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1032 {
1033         unsigned int size = AES_BLOCK_SIZE;
1034         __be32 *b = (__be32 *)(dstiv + size);
1035         u32 c, prev;
1036
1037         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1038         for (; size >= 4; size -= 4) {
1039                 prev = be32_to_cpu(*--b);
1040                 c = prev + add;
1041                 *b = cpu_to_be32(c);
1042                 if (prev < c)
1043                         break;
1044                 add = 1;
1045         }
1046
1047 }
1048
1049 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1050 {
1051         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1052         u64 c;
1053         u32 temp = be32_to_cpu(*--b);
1054
1055         temp = ~temp;
1056         c = (u64)temp +  1; // No of block can processed without overflow
1057         if ((bytes / AES_BLOCK_SIZE) >= c)
1058                 bytes = c * AES_BLOCK_SIZE;
1059         return bytes;
1060 }
1061
1062 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1063                              u32 isfinal)
1064 {
1065         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1066         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1067         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1068         struct crypto_aes_ctx aes;
1069         int ret, i;
1070         u8 *key;
1071         unsigned int keylen;
1072         int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1073         int round8 = round / 8;
1074
1075         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1076
1077         keylen = ablkctx->enckey_len / 2;
1078         key = ablkctx->key + keylen;
1079         /* For a 192 bit key remove the padded zeroes which was
1080          * added in chcr_xts_setkey
1081          */
1082         if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1083                         == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1084                 ret = aes_expandkey(&aes, key, keylen - 8);
1085         else
1086                 ret = aes_expandkey(&aes, key, keylen);
1087         if (ret)
1088                 return ret;
1089         aes_encrypt(&aes, iv, iv);
1090         for (i = 0; i < round8; i++)
1091                 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1092
1093         for (i = 0; i < (round % 8); i++)
1094                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1095
1096         if (!isfinal)
1097                 aes_decrypt(&aes, iv, iv);
1098
1099         memzero_explicit(&aes, sizeof(aes));
1100         return 0;
1101 }
1102
1103 static int chcr_update_cipher_iv(struct skcipher_request *req,
1104                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1105 {
1106         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1107         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1108         int subtype = get_cryptoalg_subtype(tfm);
1109         int ret = 0;
1110
1111         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1112                 ctr_add_iv(iv, req->iv, (reqctx->processed /
1113                            AES_BLOCK_SIZE));
1114         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1115                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1116                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1117                                                 AES_BLOCK_SIZE) + 1);
1118         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1119                 ret = chcr_update_tweak(req, iv, 0);
1120         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1121                 if (reqctx->op)
1122                         /*Updated before sending last WR*/
1123                         memcpy(iv, req->iv, AES_BLOCK_SIZE);
1124                 else
1125                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1126         }
1127
1128         return ret;
1129
1130 }
1131
1132 /* We need separate function for final iv because in rfc3686  Initial counter
1133  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1134  * for subsequent update requests
1135  */
1136
1137 static int chcr_final_cipher_iv(struct skcipher_request *req,
1138                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1139 {
1140         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1141         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1142         int subtype = get_cryptoalg_subtype(tfm);
1143         int ret = 0;
1144
1145         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1146                 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1147                                                        AES_BLOCK_SIZE));
1148         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1149                 if (!reqctx->partial_req)
1150                         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1151                 else
1152                         ret = chcr_update_tweak(req, iv, 1);
1153         }
1154         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1155                 /*Already updated for Decrypt*/
1156                 if (!reqctx->op)
1157                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1158
1159         }
1160         return ret;
1161
1162 }
1163
1164 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1165                                    unsigned char *input, int err)
1166 {
1167         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1168         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1169         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1170         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1171         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1172         struct chcr_dev *dev = c_ctx(tfm)->dev;
1173         struct chcr_context *ctx = c_ctx(tfm);
1174         struct adapter *adap = padap(ctx->dev);
1175         struct cipher_wr_param wrparam;
1176         struct sk_buff *skb;
1177         int bytes;
1178
1179         if (err)
1180                 goto unmap;
1181         if (req->cryptlen == reqctx->processed) {
1182                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1183                                       req);
1184                 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1185                 goto complete;
1186         }
1187
1188         if (!reqctx->imm) {
1189                 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1190                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1191                                           reqctx->src_ofst, reqctx->dst_ofst);
1192                 if ((bytes + reqctx->processed) >= req->cryptlen)
1193                         bytes  = req->cryptlen - reqctx->processed;
1194                 else
1195                         bytes = rounddown(bytes, 16);
1196         } else {
1197                 /*CTR mode counter overfloa*/
1198                 bytes  = req->cryptlen - reqctx->processed;
1199         }
1200         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1201         if (err)
1202                 goto unmap;
1203
1204         if (unlikely(bytes == 0)) {
1205                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1206                                       req);
1207                 memcpy(req->iv, reqctx->init_iv, IV);
1208                 atomic_inc(&adap->chcr_stats.fallback);
1209                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1210                                      req->base.flags,
1211                                      req->src,
1212                                      req->dst,
1213                                      req->cryptlen,
1214                                      req->iv,
1215                                      reqctx->op);
1216                 goto complete;
1217         }
1218
1219         if (get_cryptoalg_subtype(tfm) ==
1220             CRYPTO_ALG_SUB_TYPE_CTR)
1221                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1222         wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1223         wrparam.req = req;
1224         wrparam.bytes = bytes;
1225         skb = create_cipher_wr(&wrparam);
1226         if (IS_ERR(skb)) {
1227                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1228                 err = PTR_ERR(skb);
1229                 goto unmap;
1230         }
1231         skb->dev = u_ctx->lldi.ports[0];
1232         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1233         chcr_send_wr(skb);
1234         reqctx->last_req_len = bytes;
1235         reqctx->processed += bytes;
1236         if (get_cryptoalg_subtype(tfm) ==
1237                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239                 complete(&ctx->cbc_aes_aio_done);
1240         }
1241         return 0;
1242 unmap:
1243         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1244 complete:
1245         if (get_cryptoalg_subtype(tfm) ==
1246                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1247                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1248                 complete(&ctx->cbc_aes_aio_done);
1249         }
1250         chcr_dec_wrcount(dev);
1251         req->base.complete(&req->base, err);
1252         return err;
1253 }
1254
1255 static int process_cipher(struct skcipher_request *req,
1256                                   unsigned short qid,
1257                                   struct sk_buff **skb,
1258                                   unsigned short op_type)
1259 {
1260         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1261         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1262         unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1263         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1264         struct adapter *adap = padap(c_ctx(tfm)->dev);
1265         struct  cipher_wr_param wrparam;
1266         int bytes, err = -EINVAL;
1267         int subtype;
1268
1269         reqctx->processed = 0;
1270         reqctx->partial_req = 0;
1271         if (!req->iv)
1272                 goto error;
1273         subtype = get_cryptoalg_subtype(tfm);
1274         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1275             (req->cryptlen == 0) ||
1276             (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1277                 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1278                         goto fallback;
1279                 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1280                          subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1281                         goto fallback;
1282                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1283                        ablkctx->enckey_len, req->cryptlen, ivsize);
1284                 goto error;
1285         }
1286
1287         err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1288         if (err)
1289                 goto error;
1290         if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1291                                             AES_MIN_KEY_SIZE +
1292                                             sizeof(struct cpl_rx_phys_dsgl) +
1293                                         /*Min dsgl size*/
1294                                             32))) {
1295                 /* Can be sent as Imm*/
1296                 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1297
1298                 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1299                                        CHCR_DST_SG_SIZE, 0);
1300                 phys_dsgl = get_space_for_phys_dsgl(dnents);
1301                 kctx_len = roundup(ablkctx->enckey_len, 16);
1302                 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1303                 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1304                         SGE_MAX_WR_LEN;
1305                 bytes = IV + req->cryptlen;
1306
1307         } else {
1308                 reqctx->imm = 0;
1309         }
1310
1311         if (!reqctx->imm) {
1312                 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1313                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1314                                           0, 0);
1315                 if ((bytes + reqctx->processed) >= req->cryptlen)
1316                         bytes  = req->cryptlen - reqctx->processed;
1317                 else
1318                         bytes = rounddown(bytes, 16);
1319         } else {
1320                 bytes = req->cryptlen;
1321         }
1322         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1323                 bytes = adjust_ctr_overflow(req->iv, bytes);
1324         }
1325         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1326                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1327                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1328                                 CTR_RFC3686_IV_SIZE);
1329
1330                 /* initialize counter portion of counter block */
1331                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1332                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1333                 memcpy(reqctx->init_iv, reqctx->iv, IV);
1334
1335         } else {
1336
1337                 memcpy(reqctx->iv, req->iv, IV);
1338                 memcpy(reqctx->init_iv, req->iv, IV);
1339         }
1340         if (unlikely(bytes == 0)) {
1341                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1342                                       req);
1343 fallback:       atomic_inc(&adap->chcr_stats.fallback);
1344                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1345                                            req->base.flags,
1346                                            req->src,
1347                                            req->dst,
1348                                            req->cryptlen,
1349                                            subtype ==
1350                                            CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1351                                            reqctx->iv : req->iv,
1352                                            op_type);
1353                 goto error;
1354         }
1355         reqctx->op = op_type;
1356         reqctx->srcsg = req->src;
1357         reqctx->dstsg = req->dst;
1358         reqctx->src_ofst = 0;
1359         reqctx->dst_ofst = 0;
1360         wrparam.qid = qid;
1361         wrparam.req = req;
1362         wrparam.bytes = bytes;
1363         *skb = create_cipher_wr(&wrparam);
1364         if (IS_ERR(*skb)) {
1365                 err = PTR_ERR(*skb);
1366                 goto unmap;
1367         }
1368         reqctx->processed = bytes;
1369         reqctx->last_req_len = bytes;
1370         reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1371
1372         return 0;
1373 unmap:
1374         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1375 error:
1376         return err;
1377 }
1378
1379 static int chcr_aes_encrypt(struct skcipher_request *req)
1380 {
1381         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1382         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1383         struct chcr_dev *dev = c_ctx(tfm)->dev;
1384         struct sk_buff *skb = NULL;
1385         int err;
1386         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1387         struct chcr_context *ctx = c_ctx(tfm);
1388         unsigned int cpu;
1389
1390         cpu = get_cpu();
1391         reqctx->txqidx = cpu % ctx->ntxq;
1392         reqctx->rxqidx = cpu % ctx->nrxq;
1393         put_cpu();
1394
1395         err = chcr_inc_wrcount(dev);
1396         if (err)
1397                 return -ENXIO;
1398         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1399                                                 reqctx->txqidx) &&
1400                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1401                         err = -ENOSPC;
1402                         goto error;
1403         }
1404
1405         err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1406                              &skb, CHCR_ENCRYPT_OP);
1407         if (err || !skb)
1408                 return  err;
1409         skb->dev = u_ctx->lldi.ports[0];
1410         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1411         chcr_send_wr(skb);
1412         if (get_cryptoalg_subtype(tfm) ==
1413                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1414                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1415                         reqctx->partial_req = 1;
1416                         wait_for_completion(&ctx->cbc_aes_aio_done);
1417         }
1418         return -EINPROGRESS;
1419 error:
1420         chcr_dec_wrcount(dev);
1421         return err;
1422 }
1423
1424 static int chcr_aes_decrypt(struct skcipher_request *req)
1425 {
1426         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1427         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1428         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1429         struct chcr_dev *dev = c_ctx(tfm)->dev;
1430         struct sk_buff *skb = NULL;
1431         int err;
1432         struct chcr_context *ctx = c_ctx(tfm);
1433         unsigned int cpu;
1434
1435         cpu = get_cpu();
1436         reqctx->txqidx = cpu % ctx->ntxq;
1437         reqctx->rxqidx = cpu % ctx->nrxq;
1438         put_cpu();
1439
1440         err = chcr_inc_wrcount(dev);
1441         if (err)
1442                 return -ENXIO;
1443
1444         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1445                                                 reqctx->txqidx) &&
1446                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1447                         return -ENOSPC;
1448         err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1449                              &skb, CHCR_DECRYPT_OP);
1450         if (err || !skb)
1451                 return err;
1452         skb->dev = u_ctx->lldi.ports[0];
1453         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1454         chcr_send_wr(skb);
1455         return -EINPROGRESS;
1456 }
1457 static int chcr_device_init(struct chcr_context *ctx)
1458 {
1459         struct uld_ctx *u_ctx = NULL;
1460         int txq_perchan, ntxq;
1461         int err = 0, rxq_perchan;
1462
1463         if (!ctx->dev) {
1464                 u_ctx = assign_chcr_device();
1465                 if (!u_ctx) {
1466                         err = -ENXIO;
1467                         pr_err("chcr device assignment fails\n");
1468                         goto out;
1469                 }
1470                 ctx->dev = &u_ctx->dev;
1471                 ntxq = u_ctx->lldi.ntxq;
1472                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1473                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1474                 ctx->ntxq = ntxq;
1475                 ctx->nrxq = u_ctx->lldi.nrxq;
1476                 ctx->rxq_perchan = rxq_perchan;
1477                 ctx->txq_perchan = txq_perchan;
1478         }
1479 out:
1480         return err;
1481 }
1482
1483 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1484 {
1485         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1486         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1487         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1488
1489         ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
1490                                 CRYPTO_ALG_NEED_FALLBACK);
1491         if (IS_ERR(ablkctx->sw_cipher)) {
1492                 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1493                 return PTR_ERR(ablkctx->sw_cipher);
1494         }
1495         init_completion(&ctx->cbc_aes_aio_done);
1496         crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1497
1498         return chcr_device_init(ctx);
1499 }
1500
1501 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1502 {
1503         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1504         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1505         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1506
1507         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1508          * cannot be used as fallback in chcr_handle_cipher_response
1509          */
1510         ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1511                                 CRYPTO_ALG_NEED_FALLBACK);
1512         if (IS_ERR(ablkctx->sw_cipher)) {
1513                 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1514                 return PTR_ERR(ablkctx->sw_cipher);
1515         }
1516         crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1517         return chcr_device_init(ctx);
1518 }
1519
1520
1521 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1522 {
1523         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1524         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1525
1526         crypto_free_sync_skcipher(ablkctx->sw_cipher);
1527 }
1528
1529 static int get_alg_config(struct algo_param *params,
1530                           unsigned int auth_size)
1531 {
1532         switch (auth_size) {
1533         case SHA1_DIGEST_SIZE:
1534                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1535                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1536                 params->result_size = SHA1_DIGEST_SIZE;
1537                 break;
1538         case SHA224_DIGEST_SIZE:
1539                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1540                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1541                 params->result_size = SHA256_DIGEST_SIZE;
1542                 break;
1543         case SHA256_DIGEST_SIZE:
1544                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1545                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1546                 params->result_size = SHA256_DIGEST_SIZE;
1547                 break;
1548         case SHA384_DIGEST_SIZE:
1549                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1550                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1551                 params->result_size = SHA512_DIGEST_SIZE;
1552                 break;
1553         case SHA512_DIGEST_SIZE:
1554                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1555                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1556                 params->result_size = SHA512_DIGEST_SIZE;
1557                 break;
1558         default:
1559                 pr_err("chcr : ERROR, unsupported digest size\n");
1560                 return -EINVAL;
1561         }
1562         return 0;
1563 }
1564
1565 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1566 {
1567                 crypto_free_shash(base_hash);
1568 }
1569
1570 /**
1571  *      create_hash_wr - Create hash work request
1572  *      @req - Cipher req base
1573  */
1574 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1575                                       struct hash_wr_param *param)
1576 {
1577         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1578         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1579         struct chcr_context *ctx = h_ctx(tfm);
1580         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1581         struct sk_buff *skb = NULL;
1582         struct uld_ctx *u_ctx = ULD_CTX(ctx);
1583         struct chcr_wr *chcr_req;
1584         struct ulptx_sgl *ulptx;
1585         unsigned int nents = 0, transhdr_len;
1586         unsigned int temp = 0;
1587         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1588                 GFP_ATOMIC;
1589         struct adapter *adap = padap(h_ctx(tfm)->dev);
1590         int error = 0;
1591         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1592
1593         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1594         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1595                                 param->sg_len) <= SGE_MAX_WR_LEN;
1596         nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1597                       CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1598         nents += param->bfr_len ? 1 : 0;
1599         transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1600                                 param->sg_len, 16) : (sgl_len(nents) * 8);
1601         transhdr_len = roundup(transhdr_len, 16);
1602
1603         skb = alloc_skb(transhdr_len, flags);
1604         if (!skb)
1605                 return ERR_PTR(-ENOMEM);
1606         chcr_req = __skb_put_zero(skb, transhdr_len);
1607
1608         chcr_req->sec_cpl.op_ivinsrtofst =
1609                 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1610
1611         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1612
1613         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1614                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1615         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1616                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1617         chcr_req->sec_cpl.seqno_numivs =
1618                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1619                                          param->opad_needed, 0);
1620
1621         chcr_req->sec_cpl.ivgen_hdrlen =
1622                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1623
1624         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1625                param->alg_prm.result_size);
1626
1627         if (param->opad_needed)
1628                 memcpy(chcr_req->key_ctx.key +
1629                        ((param->alg_prm.result_size <= 32) ? 32 :
1630                         CHCR_HASH_MAX_DIGEST_SIZE),
1631                        hmacctx->opad, param->alg_prm.result_size);
1632
1633         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1634                                             param->alg_prm.mk_size, 0,
1635                                             param->opad_needed,
1636                                             ((param->kctx_len +
1637                                              sizeof(chcr_req->key_ctx)) >> 4));
1638         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1639         ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1640                                      DUMMY_BYTES);
1641         if (param->bfr_len != 0) {
1642                 req_ctx->hctx_wr.dma_addr =
1643                         dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1644                                        param->bfr_len, DMA_TO_DEVICE);
1645                 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1646                                        req_ctx->hctx_wr. dma_addr)) {
1647                         error = -ENOMEM;
1648                         goto err;
1649                 }
1650                 req_ctx->hctx_wr.dma_len = param->bfr_len;
1651         } else {
1652                 req_ctx->hctx_wr.dma_addr = 0;
1653         }
1654         chcr_add_hash_src_ent(req, ulptx, param);
1655         /* Request upto max wr size */
1656         temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1657                                 (param->sg_len + param->bfr_len) : 0);
1658         atomic_inc(&adap->chcr_stats.digest_rqst);
1659         create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1660                     param->hash_size, transhdr_len,
1661                     temp,  0);
1662         req_ctx->hctx_wr.skb = skb;
1663         return skb;
1664 err:
1665         kfree_skb(skb);
1666         return  ERR_PTR(error);
1667 }
1668
1669 static int chcr_ahash_update(struct ahash_request *req)
1670 {
1671         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1672         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1673         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1674         struct chcr_context *ctx = h_ctx(rtfm);
1675         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1676         struct sk_buff *skb;
1677         u8 remainder = 0, bs;
1678         unsigned int nbytes = req->nbytes;
1679         struct hash_wr_param params;
1680         int error;
1681         unsigned int cpu;
1682
1683         cpu = get_cpu();
1684         req_ctx->txqidx = cpu % ctx->ntxq;
1685         req_ctx->rxqidx = cpu % ctx->nrxq;
1686         put_cpu();
1687
1688         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1689
1690         if (nbytes + req_ctx->reqlen >= bs) {
1691                 remainder = (nbytes + req_ctx->reqlen) % bs;
1692                 nbytes = nbytes + req_ctx->reqlen - remainder;
1693         } else {
1694                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1695                                    + req_ctx->reqlen, nbytes, 0);
1696                 req_ctx->reqlen += nbytes;
1697                 return 0;
1698         }
1699         error = chcr_inc_wrcount(dev);
1700         if (error)
1701                 return -ENXIO;
1702         /* Detach state for CHCR means lldi or padap is freed. Increasing
1703          * inflight count for dev guarantees that lldi and padap is valid
1704          */
1705         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1706                                                 req_ctx->txqidx) &&
1707                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1708                         error = -ENOSPC;
1709                         goto err;
1710         }
1711
1712         chcr_init_hctx_per_wr(req_ctx);
1713         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1714         if (error) {
1715                 error = -ENOMEM;
1716                 goto err;
1717         }
1718         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1719         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1720         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1721                                      HASH_SPACE_LEFT(params.kctx_len), 0);
1722         if (params.sg_len > req->nbytes)
1723                 params.sg_len = req->nbytes;
1724         params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1725                         req_ctx->reqlen;
1726         params.opad_needed = 0;
1727         params.more = 1;
1728         params.last = 0;
1729         params.bfr_len = req_ctx->reqlen;
1730         params.scmd1 = 0;
1731         req_ctx->hctx_wr.srcsg = req->src;
1732
1733         params.hash_size = params.alg_prm.result_size;
1734         req_ctx->data_len += params.sg_len + params.bfr_len;
1735         skb = create_hash_wr(req, &params);
1736         if (IS_ERR(skb)) {
1737                 error = PTR_ERR(skb);
1738                 goto unmap;
1739         }
1740
1741         req_ctx->hctx_wr.processed += params.sg_len;
1742         if (remainder) {
1743                 /* Swap buffers */
1744                 swap(req_ctx->reqbfr, req_ctx->skbfr);
1745                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1746                                    req_ctx->reqbfr, remainder, req->nbytes -
1747                                    remainder);
1748         }
1749         req_ctx->reqlen = remainder;
1750         skb->dev = u_ctx->lldi.ports[0];
1751         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1752         chcr_send_wr(skb);
1753         return -EINPROGRESS;
1754 unmap:
1755         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1756 err:
1757         chcr_dec_wrcount(dev);
1758         return error;
1759 }
1760
1761 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1762 {
1763         memset(bfr_ptr, 0, bs);
1764         *bfr_ptr = 0x80;
1765         if (bs == 64)
1766                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1767         else
1768                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1769 }
1770
1771 static int chcr_ahash_final(struct ahash_request *req)
1772 {
1773         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1774         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1775         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1776         struct hash_wr_param params;
1777         struct sk_buff *skb;
1778         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1779         struct chcr_context *ctx = h_ctx(rtfm);
1780         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1781         int error;
1782         unsigned int cpu;
1783
1784         cpu = get_cpu();
1785         req_ctx->txqidx = cpu % ctx->ntxq;
1786         req_ctx->rxqidx = cpu % ctx->nrxq;
1787         put_cpu();
1788
1789         error = chcr_inc_wrcount(dev);
1790         if (error)
1791                 return -ENXIO;
1792
1793         chcr_init_hctx_per_wr(req_ctx);
1794         if (is_hmac(crypto_ahash_tfm(rtfm)))
1795                 params.opad_needed = 1;
1796         else
1797                 params.opad_needed = 0;
1798         params.sg_len = 0;
1799         req_ctx->hctx_wr.isfinal = 1;
1800         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1801         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1802         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1803                 params.opad_needed = 1;
1804                 params.kctx_len *= 2;
1805         } else {
1806                 params.opad_needed = 0;
1807         }
1808
1809         req_ctx->hctx_wr.result = 1;
1810         params.bfr_len = req_ctx->reqlen;
1811         req_ctx->data_len += params.bfr_len + params.sg_len;
1812         req_ctx->hctx_wr.srcsg = req->src;
1813         if (req_ctx->reqlen == 0) {
1814                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1815                 params.last = 0;
1816                 params.more = 1;
1817                 params.scmd1 = 0;
1818                 params.bfr_len = bs;
1819
1820         } else {
1821                 params.scmd1 = req_ctx->data_len;
1822                 params.last = 1;
1823                 params.more = 0;
1824         }
1825         params.hash_size = crypto_ahash_digestsize(rtfm);
1826         skb = create_hash_wr(req, &params);
1827         if (IS_ERR(skb)) {
1828                 error = PTR_ERR(skb);
1829                 goto err;
1830         }
1831         req_ctx->reqlen = 0;
1832         skb->dev = u_ctx->lldi.ports[0];
1833         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1834         chcr_send_wr(skb);
1835         return -EINPROGRESS;
1836 err:
1837         chcr_dec_wrcount(dev);
1838         return error;
1839 }
1840
1841 static int chcr_ahash_finup(struct ahash_request *req)
1842 {
1843         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1844         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1845         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1846         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1847         struct chcr_context *ctx = h_ctx(rtfm);
1848         struct sk_buff *skb;
1849         struct hash_wr_param params;
1850         u8  bs;
1851         int error;
1852         unsigned int cpu;
1853
1854         cpu = get_cpu();
1855         req_ctx->txqidx = cpu % ctx->ntxq;
1856         req_ctx->rxqidx = cpu % ctx->nrxq;
1857         put_cpu();
1858
1859         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1860         error = chcr_inc_wrcount(dev);
1861         if (error)
1862                 return -ENXIO;
1863
1864         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1865                                                 req_ctx->txqidx) &&
1866                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1867                         error = -ENOSPC;
1868                         goto err;
1869         }
1870         chcr_init_hctx_per_wr(req_ctx);
1871         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1872         if (error) {
1873                 error = -ENOMEM;
1874                 goto err;
1875         }
1876
1877         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1878         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1879         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1880                 params.kctx_len *= 2;
1881                 params.opad_needed = 1;
1882         } else {
1883                 params.opad_needed = 0;
1884         }
1885
1886         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1887                                     HASH_SPACE_LEFT(params.kctx_len), 0);
1888         if (params.sg_len < req->nbytes) {
1889                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1890                         params.kctx_len /= 2;
1891                         params.opad_needed = 0;
1892                 }
1893                 params.last = 0;
1894                 params.more = 1;
1895                 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1896                                         - req_ctx->reqlen;
1897                 params.hash_size = params.alg_prm.result_size;
1898                 params.scmd1 = 0;
1899         } else {
1900                 params.last = 1;
1901                 params.more = 0;
1902                 params.sg_len = req->nbytes;
1903                 params.hash_size = crypto_ahash_digestsize(rtfm);
1904                 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1905                                 params.sg_len;
1906         }
1907         params.bfr_len = req_ctx->reqlen;
1908         req_ctx->data_len += params.bfr_len + params.sg_len;
1909         req_ctx->hctx_wr.result = 1;
1910         req_ctx->hctx_wr.srcsg = req->src;
1911         if ((req_ctx->reqlen + req->nbytes) == 0) {
1912                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1913                 params.last = 0;
1914                 params.more = 1;
1915                 params.scmd1 = 0;
1916                 params.bfr_len = bs;
1917         }
1918         skb = create_hash_wr(req, &params);
1919         if (IS_ERR(skb)) {
1920                 error = PTR_ERR(skb);
1921                 goto unmap;
1922         }
1923         req_ctx->reqlen = 0;
1924         req_ctx->hctx_wr.processed += params.sg_len;
1925         skb->dev = u_ctx->lldi.ports[0];
1926         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1927         chcr_send_wr(skb);
1928         return -EINPROGRESS;
1929 unmap:
1930         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1931 err:
1932         chcr_dec_wrcount(dev);
1933         return error;
1934 }
1935
1936 static int chcr_ahash_digest(struct ahash_request *req)
1937 {
1938         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1939         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1940         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1941         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1942         struct chcr_context *ctx = h_ctx(rtfm);
1943         struct sk_buff *skb;
1944         struct hash_wr_param params;
1945         u8  bs;
1946         int error;
1947         unsigned int cpu;
1948
1949         cpu = get_cpu();
1950         req_ctx->txqidx = cpu % ctx->ntxq;
1951         req_ctx->rxqidx = cpu % ctx->nrxq;
1952         put_cpu();
1953
1954         rtfm->init(req);
1955         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1956         error = chcr_inc_wrcount(dev);
1957         if (error)
1958                 return -ENXIO;
1959
1960         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1961                                                 req_ctx->txqidx) &&
1962                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1963                         error = -ENOSPC;
1964                         goto err;
1965         }
1966
1967         chcr_init_hctx_per_wr(req_ctx);
1968         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1969         if (error) {
1970                 error = -ENOMEM;
1971                 goto err;
1972         }
1973
1974         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1975         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1976         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1977                 params.kctx_len *= 2;
1978                 params.opad_needed = 1;
1979         } else {
1980                 params.opad_needed = 0;
1981         }
1982         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1983                                 HASH_SPACE_LEFT(params.kctx_len), 0);
1984         if (params.sg_len < req->nbytes) {
1985                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1986                         params.kctx_len /= 2;
1987                         params.opad_needed = 0;
1988                 }
1989                 params.last = 0;
1990                 params.more = 1;
1991                 params.scmd1 = 0;
1992                 params.sg_len = rounddown(params.sg_len, bs);
1993                 params.hash_size = params.alg_prm.result_size;
1994         } else {
1995                 params.sg_len = req->nbytes;
1996                 params.hash_size = crypto_ahash_digestsize(rtfm);
1997                 params.last = 1;
1998                 params.more = 0;
1999                 params.scmd1 = req->nbytes + req_ctx->data_len;
2000
2001         }
2002         params.bfr_len = 0;
2003         req_ctx->hctx_wr.result = 1;
2004         req_ctx->hctx_wr.srcsg = req->src;
2005         req_ctx->data_len += params.bfr_len + params.sg_len;
2006
2007         if (req->nbytes == 0) {
2008                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
2009                 params.more = 1;
2010                 params.bfr_len = bs;
2011         }
2012
2013         skb = create_hash_wr(req, &params);
2014         if (IS_ERR(skb)) {
2015                 error = PTR_ERR(skb);
2016                 goto unmap;
2017         }
2018         req_ctx->hctx_wr.processed += params.sg_len;
2019         skb->dev = u_ctx->lldi.ports[0];
2020         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2021         chcr_send_wr(skb);
2022         return -EINPROGRESS;
2023 unmap:
2024         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2025 err:
2026         chcr_dec_wrcount(dev);
2027         return error;
2028 }
2029
2030 static int chcr_ahash_continue(struct ahash_request *req)
2031 {
2032         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2033         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2034         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2035         struct chcr_context *ctx = h_ctx(rtfm);
2036         struct uld_ctx *u_ctx = ULD_CTX(ctx);
2037         struct sk_buff *skb;
2038         struct hash_wr_param params;
2039         u8  bs;
2040         int error;
2041         unsigned int cpu;
2042
2043         cpu = get_cpu();
2044         reqctx->txqidx = cpu % ctx->ntxq;
2045         reqctx->rxqidx = cpu % ctx->nrxq;
2046         put_cpu();
2047
2048         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2049         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2050         params.kctx_len = roundup(params.alg_prm.result_size, 16);
2051         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2052                 params.kctx_len *= 2;
2053                 params.opad_needed = 1;
2054         } else {
2055                 params.opad_needed = 0;
2056         }
2057         params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2058                                             HASH_SPACE_LEFT(params.kctx_len),
2059                                             hctx_wr->src_ofst);
2060         if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2061                 params.sg_len = req->nbytes - hctx_wr->processed;
2062         if (!hctx_wr->result ||
2063             ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2064                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2065                         params.kctx_len /= 2;
2066                         params.opad_needed = 0;
2067                 }
2068                 params.last = 0;
2069                 params.more = 1;
2070                 params.sg_len = rounddown(params.sg_len, bs);
2071                 params.hash_size = params.alg_prm.result_size;
2072                 params.scmd1 = 0;
2073         } else {
2074                 params.last = 1;
2075                 params.more = 0;
2076                 params.hash_size = crypto_ahash_digestsize(rtfm);
2077                 params.scmd1 = reqctx->data_len + params.sg_len;
2078         }
2079         params.bfr_len = 0;
2080         reqctx->data_len += params.sg_len;
2081         skb = create_hash_wr(req, &params);
2082         if (IS_ERR(skb)) {
2083                 error = PTR_ERR(skb);
2084                 goto err;
2085         }
2086         hctx_wr->processed += params.sg_len;
2087         skb->dev = u_ctx->lldi.ports[0];
2088         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2089         chcr_send_wr(skb);
2090         return 0;
2091 err:
2092         return error;
2093 }
2094
2095 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2096                                           unsigned char *input,
2097                                           int err)
2098 {
2099         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2100         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2101         int digestsize, updated_digestsize;
2102         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2103         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2104         struct chcr_dev *dev = h_ctx(tfm)->dev;
2105
2106         if (input == NULL)
2107                 goto out;
2108         digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2109         updated_digestsize = digestsize;
2110         if (digestsize == SHA224_DIGEST_SIZE)
2111                 updated_digestsize = SHA256_DIGEST_SIZE;
2112         else if (digestsize == SHA384_DIGEST_SIZE)
2113                 updated_digestsize = SHA512_DIGEST_SIZE;
2114
2115         if (hctx_wr->dma_addr) {
2116                 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2117                                  hctx_wr->dma_len, DMA_TO_DEVICE);
2118                 hctx_wr->dma_addr = 0;
2119         }
2120         if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2121                                  req->nbytes)) {
2122                 if (hctx_wr->result == 1) {
2123                         hctx_wr->result = 0;
2124                         memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2125                                digestsize);
2126                 } else {
2127                         memcpy(reqctx->partial_hash,
2128                                input + sizeof(struct cpl_fw6_pld),
2129                                updated_digestsize);
2130
2131                 }
2132                 goto unmap;
2133         }
2134         memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2135                updated_digestsize);
2136
2137         err = chcr_ahash_continue(req);
2138         if (err)
2139                 goto unmap;
2140         return;
2141 unmap:
2142         if (hctx_wr->is_sg_map)
2143                 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2144
2145
2146 out:
2147         chcr_dec_wrcount(dev);
2148         req->base.complete(&req->base, err);
2149 }
2150
2151 /*
2152  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
2153  *      @req: crypto request
2154  */
2155 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2156                          int err)
2157 {
2158         struct crypto_tfm *tfm = req->tfm;
2159         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2160         struct adapter *adap = padap(ctx->dev);
2161
2162         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2163         case CRYPTO_ALG_TYPE_AEAD:
2164                 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2165                 break;
2166
2167         case CRYPTO_ALG_TYPE_SKCIPHER:
2168                  chcr_handle_cipher_resp(skcipher_request_cast(req),
2169                                                input, err);
2170                 break;
2171         case CRYPTO_ALG_TYPE_AHASH:
2172                 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2173                 }
2174         atomic_inc(&adap->chcr_stats.complete);
2175         return err;
2176 }
2177 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2178 {
2179         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2180         struct chcr_ahash_req_ctx *state = out;
2181
2182         state->reqlen = req_ctx->reqlen;
2183         state->data_len = req_ctx->data_len;
2184         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2185         memcpy(state->partial_hash, req_ctx->partial_hash,
2186                CHCR_HASH_MAX_DIGEST_SIZE);
2187         chcr_init_hctx_per_wr(state);
2188         return 0;
2189 }
2190
2191 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2192 {
2193         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2194         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2195
2196         req_ctx->reqlen = state->reqlen;
2197         req_ctx->data_len = state->data_len;
2198         req_ctx->reqbfr = req_ctx->bfr1;
2199         req_ctx->skbfr = req_ctx->bfr2;
2200         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2201         memcpy(req_ctx->partial_hash, state->partial_hash,
2202                CHCR_HASH_MAX_DIGEST_SIZE);
2203         chcr_init_hctx_per_wr(req_ctx);
2204         return 0;
2205 }
2206
2207 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2208                              unsigned int keylen)
2209 {
2210         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2211         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2212         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2213         unsigned int i, err = 0, updated_digestsize;
2214
2215         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2216
2217         /* use the key to calculate the ipad and opad. ipad will sent with the
2218          * first request's data. opad will be sent with the final hash result
2219          * ipad in hmacctx->ipad and opad in hmacctx->opad location
2220          */
2221         shash->tfm = hmacctx->base_hash;
2222         if (keylen > bs) {
2223                 err = crypto_shash_digest(shash, key, keylen,
2224                                           hmacctx->ipad);
2225                 if (err)
2226                         goto out;
2227                 keylen = digestsize;
2228         } else {
2229                 memcpy(hmacctx->ipad, key, keylen);
2230         }
2231         memset(hmacctx->ipad + keylen, 0, bs - keylen);
2232         memcpy(hmacctx->opad, hmacctx->ipad, bs);
2233
2234         for (i = 0; i < bs / sizeof(int); i++) {
2235                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2236                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2237         }
2238
2239         updated_digestsize = digestsize;
2240         if (digestsize == SHA224_DIGEST_SIZE)
2241                 updated_digestsize = SHA256_DIGEST_SIZE;
2242         else if (digestsize == SHA384_DIGEST_SIZE)
2243                 updated_digestsize = SHA512_DIGEST_SIZE;
2244         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2245                                         hmacctx->ipad, digestsize);
2246         if (err)
2247                 goto out;
2248         chcr_change_order(hmacctx->ipad, updated_digestsize);
2249
2250         err = chcr_compute_partial_hash(shash, hmacctx->opad,
2251                                         hmacctx->opad, digestsize);
2252         if (err)
2253                 goto out;
2254         chcr_change_order(hmacctx->opad, updated_digestsize);
2255 out:
2256         return err;
2257 }
2258
2259 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2260                                unsigned int key_len)
2261 {
2262         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2263         unsigned short context_size = 0;
2264         int err;
2265
2266         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2267         if (err)
2268                 goto badkey_err;
2269
2270         memcpy(ablkctx->key, key, key_len);
2271         ablkctx->enckey_len = key_len;
2272         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2273         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2274         /* Both keys for xts must be aligned to 16 byte boundary
2275          * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2276          */
2277         if (key_len == 48) {
2278                 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2279                                 + 16) >> 4;
2280                 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2281                 memset(ablkctx->key + 24, 0, 8);
2282                 memset(ablkctx->key + 56, 0, 8);
2283                 ablkctx->enckey_len = 64;
2284                 ablkctx->key_ctx_hdr =
2285                         FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2286                                          CHCR_KEYCTX_NO_KEY, 1,
2287                                          0, context_size);
2288         } else {
2289                 ablkctx->key_ctx_hdr =
2290                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2291                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2292                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2293                                  CHCR_KEYCTX_NO_KEY, 1,
2294                                  0, context_size);
2295         }
2296         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2297         return 0;
2298 badkey_err:
2299         ablkctx->enckey_len = 0;
2300
2301         return err;
2302 }
2303
2304 static int chcr_sha_init(struct ahash_request *areq)
2305 {
2306         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2307         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2308         int digestsize =  crypto_ahash_digestsize(tfm);
2309
2310         req_ctx->data_len = 0;
2311         req_ctx->reqlen = 0;
2312         req_ctx->reqbfr = req_ctx->bfr1;
2313         req_ctx->skbfr = req_ctx->bfr2;
2314         copy_hash_init_values(req_ctx->partial_hash, digestsize);
2315
2316         return 0;
2317 }
2318
2319 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2320 {
2321         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2322                                  sizeof(struct chcr_ahash_req_ctx));
2323         return chcr_device_init(crypto_tfm_ctx(tfm));
2324 }
2325
2326 static int chcr_hmac_init(struct ahash_request *areq)
2327 {
2328         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2329         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2330         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2331         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2332         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2333
2334         chcr_sha_init(areq);
2335         req_ctx->data_len = bs;
2336         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2337                 if (digestsize == SHA224_DIGEST_SIZE)
2338                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2339                                SHA256_DIGEST_SIZE);
2340                 else if (digestsize == SHA384_DIGEST_SIZE)
2341                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2342                                SHA512_DIGEST_SIZE);
2343                 else
2344                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2345                                digestsize);
2346         }
2347         return 0;
2348 }
2349
2350 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2351 {
2352         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2353         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2354         unsigned int digestsize =
2355                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2356
2357         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2358                                  sizeof(struct chcr_ahash_req_ctx));
2359         hmacctx->base_hash = chcr_alloc_shash(digestsize);
2360         if (IS_ERR(hmacctx->base_hash))
2361                 return PTR_ERR(hmacctx->base_hash);
2362         return chcr_device_init(crypto_tfm_ctx(tfm));
2363 }
2364
2365 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2366 {
2367         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2368         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2369
2370         if (hmacctx->base_hash) {
2371                 chcr_free_shash(hmacctx->base_hash);
2372                 hmacctx->base_hash = NULL;
2373         }
2374 }
2375
2376 inline void chcr_aead_common_exit(struct aead_request *req)
2377 {
2378         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2379         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2380         struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2381
2382         chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2383 }
2384
2385 static int chcr_aead_common_init(struct aead_request *req)
2386 {
2387         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2388         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2389         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2390         unsigned int authsize = crypto_aead_authsize(tfm);
2391         int error = -EINVAL;
2392
2393         /* validate key size */
2394         if (aeadctx->enckey_len == 0)
2395                 goto err;
2396         if (reqctx->op && req->cryptlen < authsize)
2397                 goto err;
2398         if (reqctx->b0_len)
2399                 reqctx->scratch_pad = reqctx->iv + IV;
2400         else
2401                 reqctx->scratch_pad = NULL;
2402
2403         error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2404                                   reqctx->op);
2405         if (error) {
2406                 error = -ENOMEM;
2407                 goto err;
2408         }
2409
2410         return 0;
2411 err:
2412         return error;
2413 }
2414
2415 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2416                                    int aadmax, int wrlen,
2417                                    unsigned short op_type)
2418 {
2419         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2420
2421         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2422             dst_nents > MAX_DSGL_ENT ||
2423             (req->assoclen > aadmax) ||
2424             (wrlen > SGE_MAX_WR_LEN))
2425                 return 1;
2426         return 0;
2427 }
2428
2429 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2430 {
2431         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2432         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2433         struct aead_request *subreq = aead_request_ctx(req);
2434
2435         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2436         aead_request_set_callback(subreq, req->base.flags,
2437                                   req->base.complete, req->base.data);
2438         aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2439                                  req->iv);
2440         aead_request_set_ad(subreq, req->assoclen);
2441         return op_type ? crypto_aead_decrypt(subreq) :
2442                 crypto_aead_encrypt(subreq);
2443 }
2444
2445 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2446                                          unsigned short qid,
2447                                          int size)
2448 {
2449         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2450         struct chcr_context *ctx = a_ctx(tfm);
2451         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2452         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2453         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2454         struct sk_buff *skb = NULL;
2455         struct chcr_wr *chcr_req;
2456         struct cpl_rx_phys_dsgl *phys_cpl;
2457         struct ulptx_sgl *ulptx;
2458         unsigned int transhdr_len;
2459         unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2460         unsigned int   kctx_len = 0, dnents, snents;
2461         unsigned int  authsize = crypto_aead_authsize(tfm);
2462         int error = -EINVAL;
2463         u8 *ivptr;
2464         int null = 0;
2465         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2466                 GFP_ATOMIC;
2467         struct adapter *adap = padap(ctx->dev);
2468         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2469
2470         if (req->cryptlen == 0)
2471                 return NULL;
2472
2473         reqctx->b0_len = 0;
2474         error = chcr_aead_common_init(req);
2475         if (error)
2476                 return ERR_PTR(error);
2477
2478         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2479                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2480                 null = 1;
2481         }
2482         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2483                 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2484         dnents += MIN_AUTH_SG; // For IV
2485         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2486                                CHCR_SRC_SG_SIZE, 0);
2487         dst_size = get_space_for_phys_dsgl(dnents);
2488         kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2489                 - sizeof(chcr_req->key_ctx);
2490         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2491         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2492                         SGE_MAX_WR_LEN;
2493         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2494                         : (sgl_len(snents) * 8);
2495         transhdr_len += temp;
2496         transhdr_len = roundup(transhdr_len, 16);
2497
2498         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2499                                     transhdr_len, reqctx->op)) {
2500                 atomic_inc(&adap->chcr_stats.fallback);
2501                 chcr_aead_common_exit(req);
2502                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2503         }
2504         skb = alloc_skb(transhdr_len, flags);
2505         if (!skb) {
2506                 error = -ENOMEM;
2507                 goto err;
2508         }
2509
2510         chcr_req = __skb_put_zero(skb, transhdr_len);
2511
2512         temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2513
2514         /*
2515          * Input order  is AAD,IV and Payload. where IV should be included as
2516          * the part of authdata. All other fields should be filled according
2517          * to the hardware spec
2518          */
2519         chcr_req->sec_cpl.op_ivinsrtofst =
2520                                 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2521         chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2522         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2523                                         null ? 0 : 1 + IV,
2524                                         null ? 0 : IV + req->assoclen,
2525                                         req->assoclen + IV + 1,
2526                                         (temp & 0x1F0) >> 4);
2527         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2528                                         temp & 0xF,
2529                                         null ? 0 : req->assoclen + IV + 1,
2530                                         temp, temp);
2531         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2532             subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2533                 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2534         else
2535                 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2536         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2537                                         (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2538                                         temp,
2539                                         actx->auth_mode, aeadctx->hmac_ctrl,
2540                                         IV >> 1);
2541         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2542                                          0, 0, dst_size);
2543
2544         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2545         if (reqctx->op == CHCR_ENCRYPT_OP ||
2546                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2547                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2548                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2549                        aeadctx->enckey_len);
2550         else
2551                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2552                        aeadctx->enckey_len);
2553
2554         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2555                actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2556         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2557         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2558         ulptx = (struct ulptx_sgl *)(ivptr + IV);
2559         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2560             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2561                 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2562                 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2563                                 CTR_RFC3686_IV_SIZE);
2564                 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2565                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2566         } else {
2567                 memcpy(ivptr, req->iv, IV);
2568         }
2569         chcr_add_aead_dst_ent(req, phys_cpl, qid);
2570         chcr_add_aead_src_ent(req, ulptx);
2571         atomic_inc(&adap->chcr_stats.cipher_rqst);
2572         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2573                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2574         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2575                    transhdr_len, temp, 0);
2576         reqctx->skb = skb;
2577
2578         return skb;
2579 err:
2580         chcr_aead_common_exit(req);
2581
2582         return ERR_PTR(error);
2583 }
2584
2585 int chcr_aead_dma_map(struct device *dev,
2586                       struct aead_request *req,
2587                       unsigned short op_type)
2588 {
2589         int error;
2590         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2591         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2592         unsigned int authsize = crypto_aead_authsize(tfm);
2593         int dst_size;
2594
2595         dst_size = req->assoclen + req->cryptlen + (op_type ?
2596                                 0 : authsize);
2597         if (!req->cryptlen || !dst_size)
2598                 return 0;
2599         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600                                         DMA_BIDIRECTIONAL);
2601         if (dma_mapping_error(dev, reqctx->iv_dma))
2602                 return -ENOMEM;
2603         if (reqctx->b0_len)
2604                 reqctx->b0_dma = reqctx->iv_dma + IV;
2605         else
2606                 reqctx->b0_dma = 0;
2607         if (req->src == req->dst) {
2608                 error = dma_map_sg(dev, req->src,
2609                                 sg_nents_for_len(req->src, dst_size),
2610                                         DMA_BIDIRECTIONAL);
2611                 if (!error)
2612                         goto err;
2613         } else {
2614                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2615                                    DMA_TO_DEVICE);
2616                 if (!error)
2617                         goto err;
2618                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2619                                    DMA_FROM_DEVICE);
2620                 if (!error) {
2621                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2622                                    DMA_TO_DEVICE);
2623                         goto err;
2624                 }
2625         }
2626
2627         return 0;
2628 err:
2629         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2630         return -ENOMEM;
2631 }
2632
2633 void chcr_aead_dma_unmap(struct device *dev,
2634                          struct aead_request *req,
2635                          unsigned short op_type)
2636 {
2637         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2638         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2639         unsigned int authsize = crypto_aead_authsize(tfm);
2640         int dst_size;
2641
2642         dst_size = req->assoclen + req->cryptlen + (op_type ?
2643                                         0 : authsize);
2644         if (!req->cryptlen || !dst_size)
2645                 return;
2646
2647         dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2648                                         DMA_BIDIRECTIONAL);
2649         if (req->src == req->dst) {
2650                 dma_unmap_sg(dev, req->src,
2651                              sg_nents_for_len(req->src, dst_size),
2652                              DMA_BIDIRECTIONAL);
2653         } else {
2654                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2655                                    DMA_TO_DEVICE);
2656                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2657                                    DMA_FROM_DEVICE);
2658         }
2659 }
2660
2661 void chcr_add_aead_src_ent(struct aead_request *req,
2662                            struct ulptx_sgl *ulptx)
2663 {
2664         struct ulptx_walk ulp_walk;
2665         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2666
2667         if (reqctx->imm) {
2668                 u8 *buf = (u8 *)ulptx;
2669
2670                 if (reqctx->b0_len) {
2671                         memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2672                         buf += reqctx->b0_len;
2673                 }
2674                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2675                                    buf, req->cryptlen + req->assoclen, 0);
2676         } else {
2677                 ulptx_walk_init(&ulp_walk, ulptx);
2678                 if (reqctx->b0_len)
2679                         ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2680                                             reqctx->b0_dma);
2681                 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2682                                   req->assoclen,  0);
2683                 ulptx_walk_end(&ulp_walk);
2684         }
2685 }
2686
2687 void chcr_add_aead_dst_ent(struct aead_request *req,
2688                            struct cpl_rx_phys_dsgl *phys_cpl,
2689                            unsigned short qid)
2690 {
2691         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2692         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2693         struct dsgl_walk dsgl_walk;
2694         unsigned int authsize = crypto_aead_authsize(tfm);
2695         struct chcr_context *ctx = a_ctx(tfm);
2696         u32 temp;
2697         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2698
2699         dsgl_walk_init(&dsgl_walk, phys_cpl);
2700         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2701         temp = req->assoclen + req->cryptlen +
2702                 (reqctx->op ? -authsize : authsize);
2703         dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2704         dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2705 }
2706
2707 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2708                              void *ulptx,
2709                              struct  cipher_wr_param *wrparam)
2710 {
2711         struct ulptx_walk ulp_walk;
2712         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2713         u8 *buf = ulptx;
2714
2715         memcpy(buf, reqctx->iv, IV);
2716         buf += IV;
2717         if (reqctx->imm) {
2718                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2719                                    buf, wrparam->bytes, reqctx->processed);
2720         } else {
2721                 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2722                 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2723                                   reqctx->src_ofst);
2724                 reqctx->srcsg = ulp_walk.last_sg;
2725                 reqctx->src_ofst = ulp_walk.last_sg_len;
2726                 ulptx_walk_end(&ulp_walk);
2727         }
2728 }
2729
2730 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2731                              struct cpl_rx_phys_dsgl *phys_cpl,
2732                              struct  cipher_wr_param *wrparam,
2733                              unsigned short qid)
2734 {
2735         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2736         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2737         struct chcr_context *ctx = c_ctx(tfm);
2738         struct dsgl_walk dsgl_walk;
2739         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2740
2741         dsgl_walk_init(&dsgl_walk, phys_cpl);
2742         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2743                          reqctx->dst_ofst);
2744         reqctx->dstsg = dsgl_walk.last_sg;
2745         reqctx->dst_ofst = dsgl_walk.last_sg_len;
2746         dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2747 }
2748
2749 void chcr_add_hash_src_ent(struct ahash_request *req,
2750                            struct ulptx_sgl *ulptx,
2751                            struct hash_wr_param *param)
2752 {
2753         struct ulptx_walk ulp_walk;
2754         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2755
2756         if (reqctx->hctx_wr.imm) {
2757                 u8 *buf = (u8 *)ulptx;
2758
2759                 if (param->bfr_len) {
2760                         memcpy(buf, reqctx->reqbfr, param->bfr_len);
2761                         buf += param->bfr_len;
2762                 }
2763
2764                 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2765                                    sg_nents(reqctx->hctx_wr.srcsg), buf,
2766                                    param->sg_len, 0);
2767         } else {
2768                 ulptx_walk_init(&ulp_walk, ulptx);
2769                 if (param->bfr_len)
2770                         ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2771                                             reqctx->hctx_wr.dma_addr);
2772                 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2773                                   param->sg_len, reqctx->hctx_wr.src_ofst);
2774                 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2775                 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2776                 ulptx_walk_end(&ulp_walk);
2777         }
2778 }
2779
2780 int chcr_hash_dma_map(struct device *dev,
2781                       struct ahash_request *req)
2782 {
2783         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2784         int error = 0;
2785
2786         if (!req->nbytes)
2787                 return 0;
2788         error = dma_map_sg(dev, req->src, sg_nents(req->src),
2789                            DMA_TO_DEVICE);
2790         if (!error)
2791                 return -ENOMEM;
2792         req_ctx->hctx_wr.is_sg_map = 1;
2793         return 0;
2794 }
2795
2796 void chcr_hash_dma_unmap(struct device *dev,
2797                          struct ahash_request *req)
2798 {
2799         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2800
2801         if (!req->nbytes)
2802                 return;
2803
2804         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2805                            DMA_TO_DEVICE);
2806         req_ctx->hctx_wr.is_sg_map = 0;
2807
2808 }
2809
2810 int chcr_cipher_dma_map(struct device *dev,
2811                         struct skcipher_request *req)
2812 {
2813         int error;
2814
2815         if (req->src == req->dst) {
2816                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2817                                    DMA_BIDIRECTIONAL);
2818                 if (!error)
2819                         goto err;
2820         } else {
2821                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2822                                    DMA_TO_DEVICE);
2823                 if (!error)
2824                         goto err;
2825                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2826                                    DMA_FROM_DEVICE);
2827                 if (!error) {
2828                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2829                                    DMA_TO_DEVICE);
2830                         goto err;
2831                 }
2832         }
2833
2834         return 0;
2835 err:
2836         return -ENOMEM;
2837 }
2838
2839 void chcr_cipher_dma_unmap(struct device *dev,
2840                            struct skcipher_request *req)
2841 {
2842         if (req->src == req->dst) {
2843                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2844                                    DMA_BIDIRECTIONAL);
2845         } else {
2846                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2847                                    DMA_TO_DEVICE);
2848                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2849                                    DMA_FROM_DEVICE);
2850         }
2851 }
2852
2853 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2854 {
2855         __be32 data;
2856
2857         memset(block, 0, csize);
2858         block += csize;
2859
2860         if (csize >= 4)
2861                 csize = 4;
2862         else if (msglen > (unsigned int)(1 << (8 * csize)))
2863                 return -EOVERFLOW;
2864
2865         data = cpu_to_be32(msglen);
2866         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2867
2868         return 0;
2869 }
2870
2871 static int generate_b0(struct aead_request *req, u8 *ivptr,
2872                         unsigned short op_type)
2873 {
2874         unsigned int l, lp, m;
2875         int rc;
2876         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2877         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2878         u8 *b0 = reqctx->scratch_pad;
2879
2880         m = crypto_aead_authsize(aead);
2881
2882         memcpy(b0, ivptr, 16);
2883
2884         lp = b0[0];
2885         l = lp + 1;
2886
2887         /* set m, bits 3-5 */
2888         *b0 |= (8 * ((m - 2) / 2));
2889
2890         /* set adata, bit 6, if associated data is used */
2891         if (req->assoclen)
2892                 *b0 |= 64;
2893         rc = set_msg_len(b0 + 16 - l,
2894                          (op_type == CHCR_DECRYPT_OP) ?
2895                          req->cryptlen - m : req->cryptlen, l);
2896
2897         return rc;
2898 }
2899
2900 static inline int crypto_ccm_check_iv(const u8 *iv)
2901 {
2902         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2903         if (iv[0] < 1 || iv[0] > 7)
2904                 return -EINVAL;
2905
2906         return 0;
2907 }
2908
2909 static int ccm_format_packet(struct aead_request *req,
2910                              u8 *ivptr,
2911                              unsigned int sub_type,
2912                              unsigned short op_type,
2913                              unsigned int assoclen)
2914 {
2915         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2916         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2917         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2918         int rc = 0;
2919
2920         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2921                 ivptr[0] = 3;
2922                 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2923                 memcpy(ivptr + 4, req->iv, 8);
2924                 memset(ivptr + 12, 0, 4);
2925         } else {
2926                 memcpy(ivptr, req->iv, 16);
2927         }
2928         if (assoclen)
2929                 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2930
2931         rc = generate_b0(req, ivptr, op_type);
2932         /* zero the ctr value */
2933         memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2934         return rc;
2935 }
2936
2937 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2938                                   unsigned int dst_size,
2939                                   struct aead_request *req,
2940                                   unsigned short op_type)
2941 {
2942         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2943         struct chcr_context *ctx = a_ctx(tfm);
2944         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2945         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2946         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2947         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2948         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2949         unsigned int ccm_xtra;
2950         unsigned int tag_offset = 0, auth_offset = 0;
2951         unsigned int assoclen;
2952
2953         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2954                 assoclen = req->assoclen - 8;
2955         else
2956                 assoclen = req->assoclen;
2957         ccm_xtra = CCM_B0_SIZE +
2958                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2959
2960         auth_offset = req->cryptlen ?
2961                 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2962         if (op_type == CHCR_DECRYPT_OP) {
2963                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2964                         tag_offset = crypto_aead_authsize(tfm);
2965                 else
2966                         auth_offset = 0;
2967         }
2968
2969         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2970         sec_cpl->pldlen =
2971                 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2972         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2973         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2974                                 1 + IV, IV + assoclen + ccm_xtra,
2975                                 req->assoclen + IV + 1 + ccm_xtra, 0);
2976
2977         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2978                                         auth_offset, tag_offset,
2979                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2980                                         crypto_aead_authsize(tfm));
2981         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2982                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2983                                         cipher_mode, mac_mode,
2984                                         aeadctx->hmac_ctrl, IV >> 1);
2985
2986         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2987                                         0, dst_size);
2988 }
2989
2990 static int aead_ccm_validate_input(unsigned short op_type,
2991                                    struct aead_request *req,
2992                                    struct chcr_aead_ctx *aeadctx,
2993                                    unsigned int sub_type)
2994 {
2995         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2996                 if (crypto_ccm_check_iv(req->iv)) {
2997                         pr_err("CCM: IV check fails\n");
2998                         return -EINVAL;
2999                 }
3000         } else {
3001                 if (req->assoclen != 16 && req->assoclen != 20) {
3002                         pr_err("RFC4309: Invalid AAD length %d\n",
3003                                req->assoclen);
3004                         return -EINVAL;
3005                 }
3006         }
3007         return 0;
3008 }
3009
3010 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3011                                           unsigned short qid,
3012                                           int size)
3013 {
3014         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3015         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3016         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3017         struct sk_buff *skb = NULL;
3018         struct chcr_wr *chcr_req;
3019         struct cpl_rx_phys_dsgl *phys_cpl;
3020         struct ulptx_sgl *ulptx;
3021         unsigned int transhdr_len;
3022         unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3023         unsigned int sub_type, assoclen = req->assoclen;
3024         unsigned int authsize = crypto_aead_authsize(tfm);
3025         int error = -EINVAL;
3026         u8 *ivptr;
3027         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3028                 GFP_ATOMIC;
3029         struct adapter *adap = padap(a_ctx(tfm)->dev);
3030
3031         sub_type = get_aead_subtype(tfm);
3032         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3033                 assoclen -= 8;
3034         reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3035         error = chcr_aead_common_init(req);
3036         if (error)
3037                 return ERR_PTR(error);
3038
3039         error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3040         if (error)
3041                 goto err;
3042         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3043                         + (reqctx->op ? -authsize : authsize),
3044                         CHCR_DST_SG_SIZE, 0);
3045         dnents += MIN_CCM_SG; // For IV and B0
3046         dst_size = get_space_for_phys_dsgl(dnents);
3047         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3048                                CHCR_SRC_SG_SIZE, 0);
3049         snents += MIN_CCM_SG; //For B0
3050         kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3051         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3052         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3053                        reqctx->b0_len) <= SGE_MAX_WR_LEN;
3054         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3055                                      reqctx->b0_len, 16) :
3056                 (sgl_len(snents) *  8);
3057         transhdr_len += temp;
3058         transhdr_len = roundup(transhdr_len, 16);
3059
3060         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3061                                 reqctx->b0_len, transhdr_len, reqctx->op)) {
3062                 atomic_inc(&adap->chcr_stats.fallback);
3063                 chcr_aead_common_exit(req);
3064                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3065         }
3066         skb = alloc_skb(transhdr_len,  flags);
3067
3068         if (!skb) {
3069                 error = -ENOMEM;
3070                 goto err;
3071         }
3072
3073         chcr_req = __skb_put_zero(skb, transhdr_len);
3074
3075         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3076
3077         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3078         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3079         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3080                         aeadctx->key, aeadctx->enckey_len);
3081
3082         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3083         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3084         ulptx = (struct ulptx_sgl *)(ivptr + IV);
3085         error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3086         if (error)
3087                 goto dstmap_fail;
3088         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3089         chcr_add_aead_src_ent(req, ulptx);
3090
3091         atomic_inc(&adap->chcr_stats.aead_rqst);
3092         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3093                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3094                 reqctx->b0_len) : 0);
3095         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3096                     transhdr_len, temp, 0);
3097         reqctx->skb = skb;
3098
3099         return skb;
3100 dstmap_fail:
3101         kfree_skb(skb);
3102 err:
3103         chcr_aead_common_exit(req);
3104         return ERR_PTR(error);
3105 }
3106
3107 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3108                                      unsigned short qid,
3109                                      int size)
3110 {
3111         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3112         struct chcr_context *ctx = a_ctx(tfm);
3113         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3114         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3115         struct sk_buff *skb = NULL;
3116         struct chcr_wr *chcr_req;
3117         struct cpl_rx_phys_dsgl *phys_cpl;
3118         struct ulptx_sgl *ulptx;
3119         unsigned int transhdr_len, dnents = 0, snents;
3120         unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3121         unsigned int authsize = crypto_aead_authsize(tfm);
3122         int error = -EINVAL;
3123         u8 *ivptr;
3124         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3125                 GFP_ATOMIC;
3126         struct adapter *adap = padap(ctx->dev);
3127         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3128
3129         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3130                 assoclen = req->assoclen - 8;
3131
3132         reqctx->b0_len = 0;
3133         error = chcr_aead_common_init(req);
3134         if (error)
3135                 return ERR_PTR(error);
3136         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3137                                 (reqctx->op ? -authsize : authsize),
3138                                 CHCR_DST_SG_SIZE, 0);
3139         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3140                                CHCR_SRC_SG_SIZE, 0);
3141         dnents += MIN_GCM_SG; // For IV
3142         dst_size = get_space_for_phys_dsgl(dnents);
3143         kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3144         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3145         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3146                         SGE_MAX_WR_LEN;
3147         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3148                 (sgl_len(snents) * 8);
3149         transhdr_len += temp;
3150         transhdr_len = roundup(transhdr_len, 16);
3151         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3152                             transhdr_len, reqctx->op)) {
3153
3154                 atomic_inc(&adap->chcr_stats.fallback);
3155                 chcr_aead_common_exit(req);
3156                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3157         }
3158         skb = alloc_skb(transhdr_len, flags);
3159         if (!skb) {
3160                 error = -ENOMEM;
3161                 goto err;
3162         }
3163
3164         chcr_req = __skb_put_zero(skb, transhdr_len);
3165
3166         //Offset of tag from end
3167         temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3168         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3169                                                 rx_channel_id, 2, 1);
3170         chcr_req->sec_cpl.pldlen =
3171                 htonl(req->assoclen + IV + req->cryptlen);
3172         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3173                                         assoclen ? 1 + IV : 0,
3174                                         assoclen ? IV + assoclen : 0,
3175                                         req->assoclen + IV + 1, 0);
3176         chcr_req->sec_cpl.cipherstop_lo_authinsert =
3177                         FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3178                                                 temp, temp);
3179         chcr_req->sec_cpl.seqno_numivs =
3180                         FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3181                                         CHCR_ENCRYPT_OP) ? 1 : 0,
3182                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
3183                                         CHCR_SCMD_AUTH_MODE_GHASH,
3184                                         aeadctx->hmac_ctrl, IV >> 1);
3185         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3186                                         0, 0, dst_size);
3187         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3188         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3189         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3190                GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3191
3192         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3193         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3194         /* prepare a 16 byte iv */
3195         /* S   A   L  T |  IV | 0x00000001 */
3196         if (get_aead_subtype(tfm) ==
3197             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3198                 memcpy(ivptr, aeadctx->salt, 4);
3199                 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3200         } else {
3201                 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3202         }
3203         put_unaligned_be32(0x01, &ivptr[12]);
3204         ulptx = (struct ulptx_sgl *)(ivptr + 16);
3205
3206         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3207         chcr_add_aead_src_ent(req, ulptx);
3208         atomic_inc(&adap->chcr_stats.aead_rqst);
3209         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3210                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3211         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3212                     transhdr_len, temp, reqctx->verify);
3213         reqctx->skb = skb;
3214         return skb;
3215
3216 err:
3217         chcr_aead_common_exit(req);
3218         return ERR_PTR(error);
3219 }
3220
3221
3222
3223 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3224 {
3225         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3226         struct aead_alg *alg = crypto_aead_alg(tfm);
3227
3228         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3229                                                CRYPTO_ALG_NEED_FALLBACK |
3230                                                CRYPTO_ALG_ASYNC);
3231         if  (IS_ERR(aeadctx->sw_cipher))
3232                 return PTR_ERR(aeadctx->sw_cipher);
3233         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3234                                  sizeof(struct aead_request) +
3235                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
3236         return chcr_device_init(a_ctx(tfm));
3237 }
3238
3239 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3240 {
3241         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3242
3243         crypto_free_aead(aeadctx->sw_cipher);
3244 }
3245
3246 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3247                                         unsigned int authsize)
3248 {
3249         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3250
3251         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3252         aeadctx->mayverify = VERIFY_HW;
3253         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3254 }
3255 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3256                                     unsigned int authsize)
3257 {
3258         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3259         u32 maxauth = crypto_aead_maxauthsize(tfm);
3260
3261         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3262          * true for sha1. authsize == 12 condition should be before
3263          * authsize == (maxauth >> 1)
3264          */
3265         if (authsize == ICV_4) {
3266                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3267                 aeadctx->mayverify = VERIFY_HW;
3268         } else if (authsize == ICV_6) {
3269                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3270                 aeadctx->mayverify = VERIFY_HW;
3271         } else if (authsize == ICV_10) {
3272                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3273                 aeadctx->mayverify = VERIFY_HW;
3274         } else if (authsize == ICV_12) {
3275                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3276                 aeadctx->mayverify = VERIFY_HW;
3277         } else if (authsize == ICV_14) {
3278                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3279                 aeadctx->mayverify = VERIFY_HW;
3280         } else if (authsize == (maxauth >> 1)) {
3281                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3282                 aeadctx->mayverify = VERIFY_HW;
3283         } else if (authsize == maxauth) {
3284                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3285                 aeadctx->mayverify = VERIFY_HW;
3286         } else {
3287                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3288                 aeadctx->mayverify = VERIFY_SW;
3289         }
3290         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3291 }
3292
3293
3294 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3295 {
3296         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3297
3298         switch (authsize) {
3299         case ICV_4:
3300                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3301                 aeadctx->mayverify = VERIFY_HW;
3302                 break;
3303         case ICV_8:
3304                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3305                 aeadctx->mayverify = VERIFY_HW;
3306                 break;
3307         case ICV_12:
3308                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3309                 aeadctx->mayverify = VERIFY_HW;
3310                 break;
3311         case ICV_14:
3312                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3313                 aeadctx->mayverify = VERIFY_HW;
3314                 break;
3315         case ICV_16:
3316                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3317                 aeadctx->mayverify = VERIFY_HW;
3318                 break;
3319         case ICV_13:
3320         case ICV_15:
3321                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3322                 aeadctx->mayverify = VERIFY_SW;
3323                 break;
3324         default:
3325                 return -EINVAL;
3326         }
3327         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3328 }
3329
3330 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3331                                           unsigned int authsize)
3332 {
3333         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3334
3335         switch (authsize) {
3336         case ICV_8:
3337                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3338                 aeadctx->mayverify = VERIFY_HW;
3339                 break;
3340         case ICV_12:
3341                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3342                 aeadctx->mayverify = VERIFY_HW;
3343                 break;
3344         case ICV_16:
3345                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3346                 aeadctx->mayverify = VERIFY_HW;
3347                 break;
3348         default:
3349                 return -EINVAL;
3350         }
3351         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3352 }
3353
3354 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3355                                 unsigned int authsize)
3356 {
3357         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3358
3359         switch (authsize) {
3360         case ICV_4:
3361                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3362                 aeadctx->mayverify = VERIFY_HW;
3363                 break;
3364         case ICV_6:
3365                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3366                 aeadctx->mayverify = VERIFY_HW;
3367                 break;
3368         case ICV_8:
3369                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3370                 aeadctx->mayverify = VERIFY_HW;
3371                 break;
3372         case ICV_10:
3373                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3374                 aeadctx->mayverify = VERIFY_HW;
3375                 break;
3376         case ICV_12:
3377                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3378                 aeadctx->mayverify = VERIFY_HW;
3379                 break;
3380         case ICV_14:
3381                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3382                 aeadctx->mayverify = VERIFY_HW;
3383                 break;
3384         case ICV_16:
3385                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3386                 aeadctx->mayverify = VERIFY_HW;
3387                 break;
3388         default:
3389                 return -EINVAL;
3390         }
3391         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3392 }
3393
3394 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3395                                 const u8 *key,
3396                                 unsigned int keylen)
3397 {
3398         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3399         unsigned char ck_size, mk_size;
3400         int key_ctx_size = 0;
3401
3402         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3403         if (keylen == AES_KEYSIZE_128) {
3404                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3405                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3406         } else if (keylen == AES_KEYSIZE_192) {
3407                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3408                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3409         } else if (keylen == AES_KEYSIZE_256) {
3410                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3411                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3412         } else {
3413                 aeadctx->enckey_len = 0;
3414                 return  -EINVAL;
3415         }
3416         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3417                                                 key_ctx_size >> 4);
3418         memcpy(aeadctx->key, key, keylen);
3419         aeadctx->enckey_len = keylen;
3420
3421         return 0;
3422 }
3423
3424 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3425                                 const u8 *key,
3426                                 unsigned int keylen)
3427 {
3428         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3429         int error;
3430
3431         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3432         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3433                               CRYPTO_TFM_REQ_MASK);
3434         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3435         if (error)
3436                 return error;
3437         return chcr_ccm_common_setkey(aead, key, keylen);
3438 }
3439
3440 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3441                                     unsigned int keylen)
3442 {
3443         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3444         int error;
3445
3446         if (keylen < 3) {
3447                 aeadctx->enckey_len = 0;
3448                 return  -EINVAL;
3449         }
3450         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3451         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3452                               CRYPTO_TFM_REQ_MASK);
3453         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3454         if (error)
3455                 return error;
3456         keylen -= 3;
3457         memcpy(aeadctx->salt, key + keylen, 3);
3458         return chcr_ccm_common_setkey(aead, key, keylen);
3459 }
3460
3461 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3462                            unsigned int keylen)
3463 {
3464         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3465         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3466         unsigned int ck_size;
3467         int ret = 0, key_ctx_size = 0;
3468         struct crypto_aes_ctx aes;
3469
3470         aeadctx->enckey_len = 0;
3471         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3472         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3473                               & CRYPTO_TFM_REQ_MASK);
3474         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3475         if (ret)
3476                 goto out;
3477
3478         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3479             keylen > 3) {
3480                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3481                 memcpy(aeadctx->salt, key + keylen, 4);
3482         }
3483         if (keylen == AES_KEYSIZE_128) {
3484                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3485         } else if (keylen == AES_KEYSIZE_192) {
3486                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3487         } else if (keylen == AES_KEYSIZE_256) {
3488                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3489         } else {
3490                 pr_err("GCM: Invalid key length %d\n", keylen);
3491                 ret = -EINVAL;
3492                 goto out;
3493         }
3494
3495         memcpy(aeadctx->key, key, keylen);
3496         aeadctx->enckey_len = keylen;
3497         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3498                 AEAD_H_SIZE;
3499         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3500                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3501                                                 0, 0,
3502                                                 key_ctx_size >> 4);
3503         /* Calculate the H = CIPH(K, 0 repeated 16 times).
3504          * It will go in key context
3505          */
3506         ret = aes_expandkey(&aes, key, keylen);
3507         if (ret) {
3508                 aeadctx->enckey_len = 0;
3509                 goto out;
3510         }
3511         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3512         aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3513         memzero_explicit(&aes, sizeof(aes));
3514
3515 out:
3516         return ret;
3517 }
3518
3519 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3520                                    unsigned int keylen)
3521 {
3522         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3523         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3524         /* it contains auth and cipher key both*/
3525         struct crypto_authenc_keys keys;
3526         unsigned int bs, subtype;
3527         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3528         int err = 0, i, key_ctx_len = 0;
3529         unsigned char ck_size = 0;
3530         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3531         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3532         struct algo_param param;
3533         int align;
3534         u8 *o_ptr = NULL;
3535
3536         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3537         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3538                               & CRYPTO_TFM_REQ_MASK);
3539         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3540         if (err)
3541                 goto out;
3542
3543         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3544                 goto out;
3545
3546         if (get_alg_config(&param, max_authsize)) {
3547                 pr_err("chcr : Unsupported digest size\n");
3548                 goto out;
3549         }
3550         subtype = get_aead_subtype(authenc);
3551         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3552                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3553                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3554                         goto out;
3555                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3556                 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3557                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3558         }
3559         if (keys.enckeylen == AES_KEYSIZE_128) {
3560                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3561         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3562                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3563         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3564                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3565         } else {
3566                 pr_err("chcr : Unsupported cipher key\n");
3567                 goto out;
3568         }
3569
3570         /* Copy only encryption key. We use authkey to generate h(ipad) and
3571          * h(opad) so authkey is not needed again. authkeylen size have the
3572          * size of the hash digest size.
3573          */
3574         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3575         aeadctx->enckey_len = keys.enckeylen;
3576         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3577                 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3578
3579                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3580                             aeadctx->enckey_len << 3);
3581         }
3582         base_hash  = chcr_alloc_shash(max_authsize);
3583         if (IS_ERR(base_hash)) {
3584                 pr_err("chcr : Base driver cannot be loaded\n");
3585                 aeadctx->enckey_len = 0;
3586                 memzero_explicit(&keys, sizeof(keys));
3587                 return -EINVAL;
3588         }
3589         {
3590                 SHASH_DESC_ON_STACK(shash, base_hash);
3591
3592                 shash->tfm = base_hash;
3593                 bs = crypto_shash_blocksize(base_hash);
3594                 align = KEYCTX_ALIGN_PAD(max_authsize);
3595                 o_ptr =  actx->h_iopad + param.result_size + align;
3596
3597                 if (keys.authkeylen > bs) {
3598                         err = crypto_shash_digest(shash, keys.authkey,
3599                                                   keys.authkeylen,
3600                                                   o_ptr);
3601                         if (err) {
3602                                 pr_err("chcr : Base driver cannot be loaded\n");
3603                                 goto out;
3604                         }
3605                         keys.authkeylen = max_authsize;
3606                 } else
3607                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
3608
3609                 /* Compute the ipad-digest*/
3610                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3611                 memcpy(pad, o_ptr, keys.authkeylen);
3612                 for (i = 0; i < bs >> 2; i++)
3613                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3614
3615                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3616                                               max_authsize))
3617                         goto out;
3618                 /* Compute the opad-digest */
3619                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3620                 memcpy(pad, o_ptr, keys.authkeylen);
3621                 for (i = 0; i < bs >> 2; i++)
3622                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3623
3624                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3625                         goto out;
3626
3627                 /* convert the ipad and opad digest to network order */
3628                 chcr_change_order(actx->h_iopad, param.result_size);
3629                 chcr_change_order(o_ptr, param.result_size);
3630                 key_ctx_len = sizeof(struct _key_ctx) +
3631                         roundup(keys.enckeylen, 16) +
3632                         (param.result_size + align) * 2;
3633                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3634                                                 0, 1, key_ctx_len >> 4);
3635                 actx->auth_mode = param.auth_mode;
3636                 chcr_free_shash(base_hash);
3637
3638                 memzero_explicit(&keys, sizeof(keys));
3639                 return 0;
3640         }
3641 out:
3642         aeadctx->enckey_len = 0;
3643         memzero_explicit(&keys, sizeof(keys));
3644         if (!IS_ERR(base_hash))
3645                 chcr_free_shash(base_hash);
3646         return -EINVAL;
3647 }
3648
3649 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3650                                         const u8 *key, unsigned int keylen)
3651 {
3652         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3653         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3654         struct crypto_authenc_keys keys;
3655         int err;
3656         /* it contains auth and cipher key both*/
3657         unsigned int subtype;
3658         int key_ctx_len = 0;
3659         unsigned char ck_size = 0;
3660
3661         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3662         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3663                               & CRYPTO_TFM_REQ_MASK);
3664         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3665         if (err)
3666                 goto out;
3667
3668         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3669                 goto out;
3670
3671         subtype = get_aead_subtype(authenc);
3672         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3673             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3674                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3675                         goto out;
3676                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3677                         - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3678                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3679         }
3680         if (keys.enckeylen == AES_KEYSIZE_128) {
3681                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3682         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3683                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3684         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3685                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3686         } else {
3687                 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3688                 goto out;
3689         }
3690         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3691         aeadctx->enckey_len = keys.enckeylen;
3692         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3693             subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3694                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3695                                 aeadctx->enckey_len << 3);
3696         }
3697         key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3698
3699         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3700                                                 0, key_ctx_len >> 4);
3701         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3702         memzero_explicit(&keys, sizeof(keys));
3703         return 0;
3704 out:
3705         aeadctx->enckey_len = 0;
3706         memzero_explicit(&keys, sizeof(keys));
3707         return -EINVAL;
3708 }
3709
3710 static int chcr_aead_op(struct aead_request *req,
3711                         int size,
3712                         create_wr_t create_wr_fn)
3713 {
3714         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3715         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3716         struct chcr_context *ctx = a_ctx(tfm);
3717         struct uld_ctx *u_ctx = ULD_CTX(ctx);
3718         struct sk_buff *skb;
3719         struct chcr_dev *cdev;
3720
3721         cdev = a_ctx(tfm)->dev;
3722         if (!cdev) {
3723                 pr_err("chcr : %s : No crypto device.\n", __func__);
3724                 return -ENXIO;
3725         }
3726
3727         if (chcr_inc_wrcount(cdev)) {
3728         /* Detach state for CHCR means lldi or padap is freed.
3729          * We cannot increment fallback here.
3730          */
3731                 return chcr_aead_fallback(req, reqctx->op);
3732         }
3733
3734         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3735                                         reqctx->txqidx) &&
3736                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3737                         chcr_dec_wrcount(cdev);
3738                         return -ENOSPC;
3739         }
3740
3741         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3742             crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3743                 pr_err("RFC4106: Invalid value of assoclen %d\n",
3744                        req->assoclen);
3745                 return -EINVAL;
3746         }
3747
3748         /* Form a WR from req */
3749         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3750
3751         if (IS_ERR_OR_NULL(skb)) {
3752                 chcr_dec_wrcount(cdev);
3753                 return PTR_ERR_OR_ZERO(skb);
3754         }
3755
3756         skb->dev = u_ctx->lldi.ports[0];
3757         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3758         chcr_send_wr(skb);
3759         return -EINPROGRESS;
3760 }
3761
3762 static int chcr_aead_encrypt(struct aead_request *req)
3763 {
3764         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3765         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3766         struct chcr_context *ctx = a_ctx(tfm);
3767         unsigned int cpu;
3768
3769         cpu = get_cpu();
3770         reqctx->txqidx = cpu % ctx->ntxq;
3771         reqctx->rxqidx = cpu % ctx->nrxq;
3772         put_cpu();
3773
3774         reqctx->verify = VERIFY_HW;
3775         reqctx->op = CHCR_ENCRYPT_OP;
3776
3777         switch (get_aead_subtype(tfm)) {
3778         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3779         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3780         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3781         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3782                 return chcr_aead_op(req, 0, create_authenc_wr);
3783         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3784         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3785                 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3786         default:
3787                 return chcr_aead_op(req, 0, create_gcm_wr);
3788         }
3789 }
3790
3791 static int chcr_aead_decrypt(struct aead_request *req)
3792 {
3793         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3794         struct chcr_context *ctx = a_ctx(tfm);
3795         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3796         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3797         int size;
3798         unsigned int cpu;
3799
3800         cpu = get_cpu();
3801         reqctx->txqidx = cpu % ctx->ntxq;
3802         reqctx->rxqidx = cpu % ctx->nrxq;
3803         put_cpu();
3804
3805         if (aeadctx->mayverify == VERIFY_SW) {
3806                 size = crypto_aead_maxauthsize(tfm);
3807                 reqctx->verify = VERIFY_SW;
3808         } else {
3809                 size = 0;
3810                 reqctx->verify = VERIFY_HW;
3811         }
3812         reqctx->op = CHCR_DECRYPT_OP;
3813         switch (get_aead_subtype(tfm)) {
3814         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3815         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3816         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3817         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3818                 return chcr_aead_op(req, size, create_authenc_wr);
3819         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3820         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3821                 return chcr_aead_op(req, size, create_aead_ccm_wr);
3822         default:
3823                 return chcr_aead_op(req, size, create_gcm_wr);
3824         }
3825 }
3826
3827 static struct chcr_alg_template driver_algs[] = {
3828         /* AES-CBC */
3829         {
3830                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3831                 .is_registered = 0,
3832                 .alg.skcipher = {
3833                         .base.cra_name          = "cbc(aes)",
3834                         .base.cra_driver_name   = "cbc-aes-chcr",
3835                         .base.cra_blocksize     = AES_BLOCK_SIZE,
3836
3837                         .init                   = chcr_init_tfm,
3838                         .exit                   = chcr_exit_tfm,
3839                         .min_keysize            = AES_MIN_KEY_SIZE,
3840                         .max_keysize            = AES_MAX_KEY_SIZE,
3841                         .ivsize                 = AES_BLOCK_SIZE,
3842                         .setkey                 = chcr_aes_cbc_setkey,
3843                         .encrypt                = chcr_aes_encrypt,
3844                         .decrypt                = chcr_aes_decrypt,
3845                         }
3846         },
3847         {
3848                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3849                 .is_registered = 0,
3850                 .alg.skcipher = {
3851                         .base.cra_name          = "xts(aes)",
3852                         .base.cra_driver_name   = "xts-aes-chcr",
3853                         .base.cra_blocksize     = AES_BLOCK_SIZE,
3854
3855                         .init                   = chcr_init_tfm,
3856                         .exit                   = chcr_exit_tfm,
3857                         .min_keysize            = 2 * AES_MIN_KEY_SIZE,
3858                         .max_keysize            = 2 * AES_MAX_KEY_SIZE,
3859                         .ivsize                 = AES_BLOCK_SIZE,
3860                         .setkey                 = chcr_aes_xts_setkey,
3861                         .encrypt                = chcr_aes_encrypt,
3862                         .decrypt                = chcr_aes_decrypt,
3863                         }
3864         },
3865         {
3866                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3867                 .is_registered = 0,
3868                 .alg.skcipher = {
3869                         .base.cra_name          = "ctr(aes)",
3870                         .base.cra_driver_name   = "ctr-aes-chcr",
3871                         .base.cra_blocksize     = 1,
3872
3873                         .init                   = chcr_init_tfm,
3874                         .exit                   = chcr_exit_tfm,
3875                         .min_keysize            = AES_MIN_KEY_SIZE,
3876                         .max_keysize            = AES_MAX_KEY_SIZE,
3877                         .ivsize                 = AES_BLOCK_SIZE,
3878                         .setkey                 = chcr_aes_ctr_setkey,
3879                         .encrypt                = chcr_aes_encrypt,
3880                         .decrypt                = chcr_aes_decrypt,
3881                 }
3882         },
3883         {
3884                 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3885                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3886                 .is_registered = 0,
3887                 .alg.skcipher = {
3888                         .base.cra_name          = "rfc3686(ctr(aes))",
3889                         .base.cra_driver_name   = "rfc3686-ctr-aes-chcr",
3890                         .base.cra_blocksize     = 1,
3891
3892                         .init                   = chcr_rfc3686_init,
3893                         .exit                   = chcr_exit_tfm,
3894                         .min_keysize            = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3895                         .max_keysize            = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3896                         .ivsize                 = CTR_RFC3686_IV_SIZE,
3897                         .setkey                 = chcr_aes_rfc3686_setkey,
3898                         .encrypt                = chcr_aes_encrypt,
3899                         .decrypt                = chcr_aes_decrypt,
3900                 }
3901         },
3902         /* SHA */
3903         {
3904                 .type = CRYPTO_ALG_TYPE_AHASH,
3905                 .is_registered = 0,
3906                 .alg.hash = {
3907                         .halg.digestsize = SHA1_DIGEST_SIZE,
3908                         .halg.base = {
3909                                 .cra_name = "sha1",
3910                                 .cra_driver_name = "sha1-chcr",
3911                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3912                         }
3913                 }
3914         },
3915         {
3916                 .type = CRYPTO_ALG_TYPE_AHASH,
3917                 .is_registered = 0,
3918                 .alg.hash = {
3919                         .halg.digestsize = SHA256_DIGEST_SIZE,
3920                         .halg.base = {
3921                                 .cra_name = "sha256",
3922                                 .cra_driver_name = "sha256-chcr",
3923                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3924                         }
3925                 }
3926         },
3927         {
3928                 .type = CRYPTO_ALG_TYPE_AHASH,
3929                 .is_registered = 0,
3930                 .alg.hash = {
3931                         .halg.digestsize = SHA224_DIGEST_SIZE,
3932                         .halg.base = {
3933                                 .cra_name = "sha224",
3934                                 .cra_driver_name = "sha224-chcr",
3935                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3936                         }
3937                 }
3938         },
3939         {
3940                 .type = CRYPTO_ALG_TYPE_AHASH,
3941                 .is_registered = 0,
3942                 .alg.hash = {
3943                         .halg.digestsize = SHA384_DIGEST_SIZE,
3944                         .halg.base = {
3945                                 .cra_name = "sha384",
3946                                 .cra_driver_name = "sha384-chcr",
3947                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3948                         }
3949                 }
3950         },
3951         {
3952                 .type = CRYPTO_ALG_TYPE_AHASH,
3953                 .is_registered = 0,
3954                 .alg.hash = {
3955                         .halg.digestsize = SHA512_DIGEST_SIZE,
3956                         .halg.base = {
3957                                 .cra_name = "sha512",
3958                                 .cra_driver_name = "sha512-chcr",
3959                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3960                         }
3961                 }
3962         },
3963         /* HMAC */
3964         {
3965                 .type = CRYPTO_ALG_TYPE_HMAC,
3966                 .is_registered = 0,
3967                 .alg.hash = {
3968                         .halg.digestsize = SHA1_DIGEST_SIZE,
3969                         .halg.base = {
3970                                 .cra_name = "hmac(sha1)",
3971                                 .cra_driver_name = "hmac-sha1-chcr",
3972                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3973                         }
3974                 }
3975         },
3976         {
3977                 .type = CRYPTO_ALG_TYPE_HMAC,
3978                 .is_registered = 0,
3979                 .alg.hash = {
3980                         .halg.digestsize = SHA224_DIGEST_SIZE,
3981                         .halg.base = {
3982                                 .cra_name = "hmac(sha224)",
3983                                 .cra_driver_name = "hmac-sha224-chcr",
3984                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3985                         }
3986                 }
3987         },
3988         {
3989                 .type = CRYPTO_ALG_TYPE_HMAC,
3990                 .is_registered = 0,
3991                 .alg.hash = {
3992                         .halg.digestsize = SHA256_DIGEST_SIZE,
3993                         .halg.base = {
3994                                 .cra_name = "hmac(sha256)",
3995                                 .cra_driver_name = "hmac-sha256-chcr",
3996                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3997                         }
3998                 }
3999         },
4000         {
4001                 .type = CRYPTO_ALG_TYPE_HMAC,
4002                 .is_registered = 0,
4003                 .alg.hash = {
4004                         .halg.digestsize = SHA384_DIGEST_SIZE,
4005                         .halg.base = {
4006                                 .cra_name = "hmac(sha384)",
4007                                 .cra_driver_name = "hmac-sha384-chcr",
4008                                 .cra_blocksize = SHA384_BLOCK_SIZE,
4009                         }
4010                 }
4011         },
4012         {
4013                 .type = CRYPTO_ALG_TYPE_HMAC,
4014                 .is_registered = 0,
4015                 .alg.hash = {
4016                         .halg.digestsize = SHA512_DIGEST_SIZE,
4017                         .halg.base = {
4018                                 .cra_name = "hmac(sha512)",
4019                                 .cra_driver_name = "hmac-sha512-chcr",
4020                                 .cra_blocksize = SHA512_BLOCK_SIZE,
4021                         }
4022                 }
4023         },
4024         /* Add AEAD Algorithms */
4025         {
4026                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4027                 .is_registered = 0,
4028                 .alg.aead = {
4029                         .base = {
4030                                 .cra_name = "gcm(aes)",
4031                                 .cra_driver_name = "gcm-aes-chcr",
4032                                 .cra_blocksize  = 1,
4033                                 .cra_priority = CHCR_AEAD_PRIORITY,
4034                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4035                                                 sizeof(struct chcr_aead_ctx) +
4036                                                 sizeof(struct chcr_gcm_ctx),
4037                         },
4038                         .ivsize = GCM_AES_IV_SIZE,
4039                         .maxauthsize = GHASH_DIGEST_SIZE,
4040                         .setkey = chcr_gcm_setkey,
4041                         .setauthsize = chcr_gcm_setauthsize,
4042                 }
4043         },
4044         {
4045                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4046                 .is_registered = 0,
4047                 .alg.aead = {
4048                         .base = {
4049                                 .cra_name = "rfc4106(gcm(aes))",
4050                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4051                                 .cra_blocksize   = 1,
4052                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4053                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4054                                                 sizeof(struct chcr_aead_ctx) +
4055                                                 sizeof(struct chcr_gcm_ctx),
4056
4057                         },
4058                         .ivsize = GCM_RFC4106_IV_SIZE,
4059                         .maxauthsize    = GHASH_DIGEST_SIZE,
4060                         .setkey = chcr_gcm_setkey,
4061                         .setauthsize    = chcr_4106_4309_setauthsize,
4062                 }
4063         },
4064         {
4065                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4066                 .is_registered = 0,
4067                 .alg.aead = {
4068                         .base = {
4069                                 .cra_name = "ccm(aes)",
4070                                 .cra_driver_name = "ccm-aes-chcr",
4071                                 .cra_blocksize   = 1,
4072                                 .cra_priority = CHCR_AEAD_PRIORITY,
4073                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4074                                                 sizeof(struct chcr_aead_ctx),
4075
4076                         },
4077                         .ivsize = AES_BLOCK_SIZE,
4078                         .maxauthsize    = GHASH_DIGEST_SIZE,
4079                         .setkey = chcr_aead_ccm_setkey,
4080                         .setauthsize    = chcr_ccm_setauthsize,
4081                 }
4082         },
4083         {
4084                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4085                 .is_registered = 0,
4086                 .alg.aead = {
4087                         .base = {
4088                                 .cra_name = "rfc4309(ccm(aes))",
4089                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4090                                 .cra_blocksize   = 1,
4091                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4092                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4093                                                 sizeof(struct chcr_aead_ctx),
4094
4095                         },
4096                         .ivsize = 8,
4097                         .maxauthsize    = GHASH_DIGEST_SIZE,
4098                         .setkey = chcr_aead_rfc4309_setkey,
4099                         .setauthsize = chcr_4106_4309_setauthsize,
4100                 }
4101         },
4102         {
4103                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4104                 .is_registered = 0,
4105                 .alg.aead = {
4106                         .base = {
4107                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4108                                 .cra_driver_name =
4109                                         "authenc-hmac-sha1-cbc-aes-chcr",
4110                                 .cra_blocksize   = AES_BLOCK_SIZE,
4111                                 .cra_priority = CHCR_AEAD_PRIORITY,
4112                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4113                                                 sizeof(struct chcr_aead_ctx) +
4114                                                 sizeof(struct chcr_authenc_ctx),
4115
4116                         },
4117                         .ivsize = AES_BLOCK_SIZE,
4118                         .maxauthsize = SHA1_DIGEST_SIZE,
4119                         .setkey = chcr_authenc_setkey,
4120                         .setauthsize = chcr_authenc_setauthsize,
4121                 }
4122         },
4123         {
4124                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4125                 .is_registered = 0,
4126                 .alg.aead = {
4127                         .base = {
4128
4129                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4130                                 .cra_driver_name =
4131                                         "authenc-hmac-sha256-cbc-aes-chcr",
4132                                 .cra_blocksize   = AES_BLOCK_SIZE,
4133                                 .cra_priority = CHCR_AEAD_PRIORITY,
4134                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4135                                                 sizeof(struct chcr_aead_ctx) +
4136                                                 sizeof(struct chcr_authenc_ctx),
4137
4138                         },
4139                         .ivsize = AES_BLOCK_SIZE,
4140                         .maxauthsize    = SHA256_DIGEST_SIZE,
4141                         .setkey = chcr_authenc_setkey,
4142                         .setauthsize = chcr_authenc_setauthsize,
4143                 }
4144         },
4145         {
4146                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4147                 .is_registered = 0,
4148                 .alg.aead = {
4149                         .base = {
4150                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4151                                 .cra_driver_name =
4152                                         "authenc-hmac-sha224-cbc-aes-chcr",
4153                                 .cra_blocksize   = AES_BLOCK_SIZE,
4154                                 .cra_priority = CHCR_AEAD_PRIORITY,
4155                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4156                                                 sizeof(struct chcr_aead_ctx) +
4157                                                 sizeof(struct chcr_authenc_ctx),
4158                         },
4159                         .ivsize = AES_BLOCK_SIZE,
4160                         .maxauthsize = SHA224_DIGEST_SIZE,
4161                         .setkey = chcr_authenc_setkey,
4162                         .setauthsize = chcr_authenc_setauthsize,
4163                 }
4164         },
4165         {
4166                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4167                 .is_registered = 0,
4168                 .alg.aead = {
4169                         .base = {
4170                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4171                                 .cra_driver_name =
4172                                         "authenc-hmac-sha384-cbc-aes-chcr",
4173                                 .cra_blocksize   = AES_BLOCK_SIZE,
4174                                 .cra_priority = CHCR_AEAD_PRIORITY,
4175                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4176                                                 sizeof(struct chcr_aead_ctx) +
4177                                                 sizeof(struct chcr_authenc_ctx),
4178
4179                         },
4180                         .ivsize = AES_BLOCK_SIZE,
4181                         .maxauthsize = SHA384_DIGEST_SIZE,
4182                         .setkey = chcr_authenc_setkey,
4183                         .setauthsize = chcr_authenc_setauthsize,
4184                 }
4185         },
4186         {
4187                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4188                 .is_registered = 0,
4189                 .alg.aead = {
4190                         .base = {
4191                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4192                                 .cra_driver_name =
4193                                         "authenc-hmac-sha512-cbc-aes-chcr",
4194                                 .cra_blocksize   = AES_BLOCK_SIZE,
4195                                 .cra_priority = CHCR_AEAD_PRIORITY,
4196                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4197                                                 sizeof(struct chcr_aead_ctx) +
4198                                                 sizeof(struct chcr_authenc_ctx),
4199
4200                         },
4201                         .ivsize = AES_BLOCK_SIZE,
4202                         .maxauthsize = SHA512_DIGEST_SIZE,
4203                         .setkey = chcr_authenc_setkey,
4204                         .setauthsize = chcr_authenc_setauthsize,
4205                 }
4206         },
4207         {
4208                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4209                 .is_registered = 0,
4210                 .alg.aead = {
4211                         .base = {
4212                                 .cra_name = "authenc(digest_null,cbc(aes))",
4213                                 .cra_driver_name =
4214                                         "authenc-digest_null-cbc-aes-chcr",
4215                                 .cra_blocksize   = AES_BLOCK_SIZE,
4216                                 .cra_priority = CHCR_AEAD_PRIORITY,
4217                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4218                                                 sizeof(struct chcr_aead_ctx) +
4219                                                 sizeof(struct chcr_authenc_ctx),
4220
4221                         },
4222                         .ivsize  = AES_BLOCK_SIZE,
4223                         .maxauthsize = 0,
4224                         .setkey  = chcr_aead_digest_null_setkey,
4225                         .setauthsize = chcr_authenc_null_setauthsize,
4226                 }
4227         },
4228         {
4229                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4230                 .is_registered = 0,
4231                 .alg.aead = {
4232                         .base = {
4233                                 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4234                                 .cra_driver_name =
4235                                 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4236                                 .cra_blocksize   = 1,
4237                                 .cra_priority = CHCR_AEAD_PRIORITY,
4238                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4239                                                 sizeof(struct chcr_aead_ctx) +
4240                                                 sizeof(struct chcr_authenc_ctx),
4241
4242                         },
4243                         .ivsize = CTR_RFC3686_IV_SIZE,
4244                         .maxauthsize = SHA1_DIGEST_SIZE,
4245                         .setkey = chcr_authenc_setkey,
4246                         .setauthsize = chcr_authenc_setauthsize,
4247                 }
4248         },
4249         {
4250                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4251                 .is_registered = 0,
4252                 .alg.aead = {
4253                         .base = {
4254
4255                                 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4256                                 .cra_driver_name =
4257                                 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4258                                 .cra_blocksize   = 1,
4259                                 .cra_priority = CHCR_AEAD_PRIORITY,
4260                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4261                                                 sizeof(struct chcr_aead_ctx) +
4262                                                 sizeof(struct chcr_authenc_ctx),
4263
4264                         },
4265                         .ivsize = CTR_RFC3686_IV_SIZE,
4266                         .maxauthsize    = SHA256_DIGEST_SIZE,
4267                         .setkey = chcr_authenc_setkey,
4268                         .setauthsize = chcr_authenc_setauthsize,
4269                 }
4270         },
4271         {
4272                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4273                 .is_registered = 0,
4274                 .alg.aead = {
4275                         .base = {
4276                                 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4277                                 .cra_driver_name =
4278                                 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4279                                 .cra_blocksize   = 1,
4280                                 .cra_priority = CHCR_AEAD_PRIORITY,
4281                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4282                                                 sizeof(struct chcr_aead_ctx) +
4283                                                 sizeof(struct chcr_authenc_ctx),
4284                         },
4285                         .ivsize = CTR_RFC3686_IV_SIZE,
4286                         .maxauthsize = SHA224_DIGEST_SIZE,
4287                         .setkey = chcr_authenc_setkey,
4288                         .setauthsize = chcr_authenc_setauthsize,
4289                 }
4290         },
4291         {
4292                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4293                 .is_registered = 0,
4294                 .alg.aead = {
4295                         .base = {
4296                                 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4297                                 .cra_driver_name =
4298                                 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4299                                 .cra_blocksize   = 1,
4300                                 .cra_priority = CHCR_AEAD_PRIORITY,
4301                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4302                                                 sizeof(struct chcr_aead_ctx) +
4303                                                 sizeof(struct chcr_authenc_ctx),
4304
4305                         },
4306                         .ivsize = CTR_RFC3686_IV_SIZE,
4307                         .maxauthsize = SHA384_DIGEST_SIZE,
4308                         .setkey = chcr_authenc_setkey,
4309                         .setauthsize = chcr_authenc_setauthsize,
4310                 }
4311         },
4312         {
4313                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4314                 .is_registered = 0,
4315                 .alg.aead = {
4316                         .base = {
4317                                 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4318                                 .cra_driver_name =
4319                                 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4320                                 .cra_blocksize   = 1,
4321                                 .cra_priority = CHCR_AEAD_PRIORITY,
4322                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4323                                                 sizeof(struct chcr_aead_ctx) +
4324                                                 sizeof(struct chcr_authenc_ctx),
4325
4326                         },
4327                         .ivsize = CTR_RFC3686_IV_SIZE,
4328                         .maxauthsize = SHA512_DIGEST_SIZE,
4329                         .setkey = chcr_authenc_setkey,
4330                         .setauthsize = chcr_authenc_setauthsize,
4331                 }
4332         },
4333         {
4334                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4335                 .is_registered = 0,
4336                 .alg.aead = {
4337                         .base = {
4338                                 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4339                                 .cra_driver_name =
4340                                 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4341                                 .cra_blocksize   = 1,
4342                                 .cra_priority = CHCR_AEAD_PRIORITY,
4343                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4344                                                 sizeof(struct chcr_aead_ctx) +
4345                                                 sizeof(struct chcr_authenc_ctx),
4346
4347                         },
4348                         .ivsize  = CTR_RFC3686_IV_SIZE,
4349                         .maxauthsize = 0,
4350                         .setkey  = chcr_aead_digest_null_setkey,
4351                         .setauthsize = chcr_authenc_null_setauthsize,
4352                 }
4353         },
4354 };
4355
4356 /*
4357  *      chcr_unregister_alg - Deregister crypto algorithms with
4358  *      kernel framework.
4359  */
4360 static int chcr_unregister_alg(void)
4361 {
4362         int i;
4363
4364         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4365                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4366                 case CRYPTO_ALG_TYPE_SKCIPHER:
4367                         if (driver_algs[i].is_registered)
4368                                 crypto_unregister_skcipher(
4369                                                 &driver_algs[i].alg.skcipher);
4370                         break;
4371                 case CRYPTO_ALG_TYPE_AEAD:
4372                         if (driver_algs[i].is_registered)
4373                                 crypto_unregister_aead(
4374                                                 &driver_algs[i].alg.aead);
4375                         break;
4376                 case CRYPTO_ALG_TYPE_AHASH:
4377                         if (driver_algs[i].is_registered)
4378                                 crypto_unregister_ahash(
4379                                                 &driver_algs[i].alg.hash);
4380                         break;
4381                 }
4382                 driver_algs[i].is_registered = 0;
4383         }
4384         return 0;
4385 }
4386
4387 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4388 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4389 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4390
4391 /*
4392  *      chcr_register_alg - Register crypto algorithms with kernel framework.
4393  */
4394 static int chcr_register_alg(void)
4395 {
4396         struct crypto_alg ai;
4397         struct ahash_alg *a_hash;
4398         int err = 0, i;
4399         char *name = NULL;
4400
4401         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4402                 if (driver_algs[i].is_registered)
4403                         continue;
4404                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4405                 case CRYPTO_ALG_TYPE_SKCIPHER:
4406                         driver_algs[i].alg.skcipher.base.cra_priority =
4407                                 CHCR_CRA_PRIORITY;
4408                         driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4409                         driver_algs[i].alg.skcipher.base.cra_flags =
4410                                 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4411                                 CRYPTO_ALG_NEED_FALLBACK;
4412                         driver_algs[i].alg.skcipher.base.cra_ctxsize =
4413                                 sizeof(struct chcr_context) +
4414                                 sizeof(struct ablk_ctx);
4415                         driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4416
4417                         err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4418                         name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4419                         break;
4420                 case CRYPTO_ALG_TYPE_AEAD:
4421                         driver_algs[i].alg.aead.base.cra_flags =
4422                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4423                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4424                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4425                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4426                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4427                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4428                         err = crypto_register_aead(&driver_algs[i].alg.aead);
4429                         name = driver_algs[i].alg.aead.base.cra_driver_name;
4430                         break;
4431                 case CRYPTO_ALG_TYPE_AHASH:
4432                         a_hash = &driver_algs[i].alg.hash;
4433                         a_hash->update = chcr_ahash_update;
4434                         a_hash->final = chcr_ahash_final;
4435                         a_hash->finup = chcr_ahash_finup;
4436                         a_hash->digest = chcr_ahash_digest;
4437                         a_hash->export = chcr_ahash_export;
4438                         a_hash->import = chcr_ahash_import;
4439                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4440                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4441                         a_hash->halg.base.cra_module = THIS_MODULE;
4442                         a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4443                         a_hash->halg.base.cra_alignmask = 0;
4444                         a_hash->halg.base.cra_exit = NULL;
4445
4446                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4447                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4448                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4449                                 a_hash->init = chcr_hmac_init;
4450                                 a_hash->setkey = chcr_ahash_setkey;
4451                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4452                         } else {
4453                                 a_hash->init = chcr_sha_init;
4454                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4455                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4456                         }
4457                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
4458                         ai = driver_algs[i].alg.hash.halg.base;
4459                         name = ai.cra_driver_name;
4460                         break;
4461                 }
4462                 if (err) {
4463                         pr_err("chcr : %s : Algorithm registration failed\n",
4464                                name);
4465                         goto register_err;
4466                 } else {
4467                         driver_algs[i].is_registered = 1;
4468                 }
4469         }
4470         return 0;
4471
4472 register_err:
4473         chcr_unregister_alg();
4474         return err;
4475 }
4476
4477 /*
4478  *      start_crypto - Register the crypto algorithms.
4479  *      This should called once when the first device comesup. After this
4480  *      kernel will start calling driver APIs for crypto operations.
4481  */
4482 int start_crypto(void)
4483 {
4484         return chcr_register_alg();
4485 }
4486
4487 /*
4488  *      stop_crypto - Deregister all the crypto algorithms with kernel.
4489  *      This should be called once when the last device goes down. After this
4490  *      kernel will not call the driver API for crypto operations.
4491  */
4492 int stop_crypto(void)
4493 {
4494         chcr_unregister_alg();
4495         return 0;
4496 }