Merge tag 'trace-v5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya (manojmalviya@chelsio.com)
36  *      Atul Gupta (atul.gupta@chelsio.com)
37  *      Jitendra Lulla (jlulla@chelsio.com)
38  *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *      Harsh Jain (harsh@chelsio.com)
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77         0, 0, 16, 24, 40, 48, 64, 72, 88,
78         96, 112, 120, 136, 144, 160, 168, 184,
79         192, 208, 216, 232, 240, 256, 264, 280,
80         288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84         0, 32, 32, 48, 48, 64, 64, 80, 80,
85         112, 112, 128, 128, 144, 144, 160, 160,
86         192, 192, 208, 208, 224, 224, 240, 240,
87         272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91         0x01000000, 0x02000000, 0x04000000, 0x08000000,
92         0x10000000, 0x20000000, 0x40000000, 0x80000000,
93         0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97                                    unsigned char *input, int err);
98
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101         return ctx->crypto_ctx->aeadctx;
102 }
103
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106         return ctx->crypto_ctx->ablkctx;
107 }
108
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111         return ctx->crypto_ctx->hmacctx;
112 }
113
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116         return gctx->ctx->gcm;
117 }
118
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121         return gctx->ctx->authenc;
122 }
123
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126         return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131         return (skb->len <= SGE_MAX_WR_LEN);
132 }
133
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136         memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140                          unsigned int entlen,
141                          unsigned int skip)
142 {
143         int nents = 0;
144         unsigned int less;
145         unsigned int skip_len = 0;
146
147         while (sg && skip) {
148                 if (sg_dma_len(sg) <= skip) {
149                         skip -= sg_dma_len(sg);
150                         skip_len = 0;
151                         sg = sg_next(sg);
152                 } else {
153                         skip_len = skip;
154                         skip = 0;
155                 }
156         }
157
158         while (sg && reqlen) {
159                 less = min(reqlen, sg_dma_len(sg) - skip_len);
160                 nents += DIV_ROUND_UP(less, entlen);
161                 reqlen -= less;
162                 skip_len = 0;
163                 sg = sg_next(sg);
164         }
165         return nents;
166 }
167
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170         struct aead_alg *alg = crypto_aead_alg(aead);
171         struct chcr_alg_template *chcr_crypto_alg =
172                 container_of(alg, struct chcr_alg_template, alg.aead);
173         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178         u8 temp[SHA512_DIGEST_SIZE];
179         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180         int authsize = crypto_aead_authsize(tfm);
181         struct cpl_fw6_pld *fw6_pld;
182         int cmp = 0;
183
184         fw6_pld = (struct cpl_fw6_pld *)input;
185         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188         } else {
189
190                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191                                 authsize, req->assoclen +
192                                 req->cryptlen - authsize);
193                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194         }
195         if (cmp)
196                 *err = -EBADMSG;
197         else
198                 *err = 0;
199 }
200
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203         if (dev->state == CHCR_DETACH)
204                 return 1;
205         atomic_inc(&dev->inflight);
206         return 0;
207 }
208
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210 {
211         atomic_dec(&dev->inflight);
212 }
213
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215                                          unsigned char *input,
216                                          int err)
217 {
218         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220         struct chcr_dev *dev = a_ctx(tfm)->dev;
221
222         chcr_aead_common_exit(req);
223         if (reqctx->verify == VERIFY_SW) {
224                 chcr_verify_tag(req, input, &err);
225                 reqctx->verify = VERIFY_HW;
226         }
227         chcr_dec_wrcount(dev);
228         req->base.complete(&req->base, err);
229
230         return err;
231 }
232
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234                                        const unsigned char *key,
235                                        unsigned int keylength)
236 {
237         u32 temp;
238         u32 w_ring[MAX_NK];
239         int i, j, k;
240         u8  nr, nk;
241
242         switch (keylength) {
243         case AES_KEYLENGTH_128BIT:
244                 nk = KEYLENGTH_4BYTES;
245                 nr = NUMBER_OF_ROUNDS_10;
246                 break;
247         case AES_KEYLENGTH_192BIT:
248                 nk = KEYLENGTH_6BYTES;
249                 nr = NUMBER_OF_ROUNDS_12;
250                 break;
251         case AES_KEYLENGTH_256BIT:
252                 nk = KEYLENGTH_8BYTES;
253                 nr = NUMBER_OF_ROUNDS_14;
254                 break;
255         default:
256                 return;
257         }
258         for (i = 0; i < nk; i++)
259                 w_ring[i] = get_unaligned_be32(&key[i * 4]);
260
261         i = 0;
262         temp = w_ring[nk - 1];
263         while (i + nk < (nr + 1) * 4) {
264                 if (!(i % nk)) {
265                         /* RotWord(temp) */
266                         temp = (temp << 8) | (temp >> 24);
267                         temp = aes_ks_subword(temp);
268                         temp ^= round_constant[i / nk];
269                 } else if (nk == 8 && (i % 4 == 0)) {
270                         temp = aes_ks_subword(temp);
271                 }
272                 w_ring[i % nk] ^= temp;
273                 temp = w_ring[i % nk];
274                 i++;
275         }
276         i--;
277         for (k = 0, j = i % nk; k < nk; k++) {
278                 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
279                 j--;
280                 if (j < 0)
281                         j += nk;
282         }
283 }
284
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286 {
287         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288
289         switch (ds) {
290         case SHA1_DIGEST_SIZE:
291                 base_hash = crypto_alloc_shash("sha1", 0, 0);
292                 break;
293         case SHA224_DIGEST_SIZE:
294                 base_hash = crypto_alloc_shash("sha224", 0, 0);
295                 break;
296         case SHA256_DIGEST_SIZE:
297                 base_hash = crypto_alloc_shash("sha256", 0, 0);
298                 break;
299         case SHA384_DIGEST_SIZE:
300                 base_hash = crypto_alloc_shash("sha384", 0, 0);
301                 break;
302         case SHA512_DIGEST_SIZE:
303                 base_hash = crypto_alloc_shash("sha512", 0, 0);
304                 break;
305         }
306
307         return base_hash;
308 }
309
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311                                      char *iopad, char *result_hash,
312                                      int digest_size)
313 {
314         struct sha1_state sha1_st;
315         struct sha256_state sha256_st;
316         struct sha512_state sha512_st;
317         int error;
318
319         if (digest_size == SHA1_DIGEST_SIZE) {
320                 error = crypto_shash_init(desc) ?:
321                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322                         crypto_shash_export(desc, (void *)&sha1_st);
323                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324         } else if (digest_size == SHA224_DIGEST_SIZE) {
325                 error = crypto_shash_init(desc) ?:
326                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327                         crypto_shash_export(desc, (void *)&sha256_st);
328                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329
330         } else if (digest_size == SHA256_DIGEST_SIZE) {
331                 error = crypto_shash_init(desc) ?:
332                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333                         crypto_shash_export(desc, (void *)&sha256_st);
334                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335
336         } else if (digest_size == SHA384_DIGEST_SIZE) {
337                 error = crypto_shash_init(desc) ?:
338                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339                         crypto_shash_export(desc, (void *)&sha512_st);
340                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341
342         } else if (digest_size == SHA512_DIGEST_SIZE) {
343                 error = crypto_shash_init(desc) ?:
344                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345                         crypto_shash_export(desc, (void *)&sha512_st);
346                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347         } else {
348                 error = -EINVAL;
349                 pr_err("Unknown digest size %d\n", digest_size);
350         }
351         return error;
352 }
353
354 static void chcr_change_order(char *buf, int ds)
355 {
356         int i;
357
358         if (ds == SHA512_DIGEST_SIZE) {
359                 for (i = 0; i < (ds / sizeof(u64)); i++)
360                         *((__be64 *)buf + i) =
361                                 cpu_to_be64(*((u64 *)buf + i));
362         } else {
363                 for (i = 0; i < (ds / sizeof(u32)); i++)
364                         *((__be32 *)buf + i) =
365                                 cpu_to_be32(*((u32 *)buf + i));
366         }
367 }
368
369 static inline int is_hmac(struct crypto_tfm *tfm)
370 {
371         struct crypto_alg *alg = tfm->__crt_alg;
372         struct chcr_alg_template *chcr_crypto_alg =
373                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374                              alg.hash);
375         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376                 return 1;
377         return 0;
378 }
379
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381                                    struct cpl_rx_phys_dsgl *dsgl)
382 {
383         walk->dsgl = dsgl;
384         walk->nents = 0;
385         walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386 }
387
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389                                  int pci_chan_id)
390 {
391         struct cpl_rx_phys_dsgl *phys_cpl;
392
393         phys_cpl = walk->dsgl;
394
395         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397         phys_cpl->pcirlxorder_to_noofsgentr =
398                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
403                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405         phys_cpl->rss_hdr_int.qid = htons(qid);
406         phys_cpl->rss_hdr_int.hash_val = 0;
407         phys_cpl->rss_hdr_int.channel = pci_chan_id;
408 }
409
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411                                         size_t size,
412                                         dma_addr_t addr)
413 {
414         int j;
415
416         if (!size)
417                 return;
418         j = walk->nents;
419         walk->to->len[j % 8] = htons(size);
420         walk->to->addr[j % 8] = cpu_to_be64(addr);
421         j++;
422         if ((j % 8) == 0)
423                 walk->to++;
424         walk->nents = j;
425 }
426
427 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
428                            struct scatterlist *sg,
429                               unsigned int slen,
430                               unsigned int skip)
431 {
432         int skip_len = 0;
433         unsigned int left_size = slen, len = 0;
434         unsigned int j = walk->nents;
435         int offset, ent_len;
436
437         if (!slen)
438                 return;
439         while (sg && skip) {
440                 if (sg_dma_len(sg) <= skip) {
441                         skip -= sg_dma_len(sg);
442                         skip_len = 0;
443                         sg = sg_next(sg);
444                 } else {
445                         skip_len = skip;
446                         skip = 0;
447                 }
448         }
449
450         while (left_size && sg) {
451                 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452                 offset = 0;
453                 while (len) {
454                         ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
455                         walk->to->len[j % 8] = htons(ent_len);
456                         walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457                                                       offset + skip_len);
458                         offset += ent_len;
459                         len -= ent_len;
460                         j++;
461                         if ((j % 8) == 0)
462                                 walk->to++;
463                 }
464                 walk->last_sg = sg;
465                 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466                                           skip_len) + skip_len;
467                 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468                 skip_len = 0;
469                 sg = sg_next(sg);
470         }
471         walk->nents = j;
472 }
473
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475                                    struct ulptx_sgl *ulp)
476 {
477         walk->sgl = ulp;
478         walk->nents = 0;
479         walk->pair_idx = 0;
480         walk->pair = ulp->sge;
481         walk->last_sg = NULL;
482         walk->last_sg_len = 0;
483 }
484
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
486 {
487         walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488                               ULPTX_NSGE_V(walk->nents));
489 }
490
491
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493                                         size_t size,
494                                         dma_addr_t addr)
495 {
496         if (!size)
497                 return;
498
499         if (walk->nents == 0) {
500                 walk->sgl->len0 = cpu_to_be32(size);
501                 walk->sgl->addr0 = cpu_to_be64(addr);
502         } else {
503                 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504                 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505                 walk->pair_idx = !walk->pair_idx;
506                 if (!walk->pair_idx)
507                         walk->pair++;
508         }
509         walk->nents++;
510 }
511
512 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
513                                         struct scatterlist *sg,
514                                unsigned int len,
515                                unsigned int skip)
516 {
517         int small;
518         int skip_len = 0;
519         unsigned int sgmin;
520
521         if (!len)
522                 return;
523         while (sg && skip) {
524                 if (sg_dma_len(sg) <= skip) {
525                         skip -= sg_dma_len(sg);
526                         skip_len = 0;
527                         sg = sg_next(sg);
528                 } else {
529                         skip_len = skip;
530                         skip = 0;
531                 }
532         }
533         WARN(!sg, "SG should not be null here\n");
534         if (sg && (walk->nents == 0)) {
535                 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537                 walk->sgl->len0 = cpu_to_be32(sgmin);
538                 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539                 walk->nents++;
540                 len -= sgmin;
541                 walk->last_sg = sg;
542                 walk->last_sg_len = sgmin + skip_len;
543                 skip_len += sgmin;
544                 if (sg_dma_len(sg) == skip_len) {
545                         sg = sg_next(sg);
546                         skip_len = 0;
547                 }
548         }
549
550         while (sg && len) {
551                 small = min(sg_dma_len(sg) - skip_len, len);
552                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553                 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554                 walk->pair->addr[walk->pair_idx] =
555                         cpu_to_be64(sg_dma_address(sg) + skip_len);
556                 walk->pair_idx = !walk->pair_idx;
557                 walk->nents++;
558                 if (!walk->pair_idx)
559                         walk->pair++;
560                 len -= sgmin;
561                 skip_len += sgmin;
562                 walk->last_sg = sg;
563                 walk->last_sg_len = skip_len;
564                 if (sg_dma_len(sg) == skip_len) {
565                         sg = sg_next(sg);
566                         skip_len = 0;
567                 }
568         }
569 }
570
571 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
572 {
573         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
574         struct chcr_alg_template *chcr_crypto_alg =
575                 container_of(alg, struct chcr_alg_template, alg.skcipher);
576
577         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578 }
579
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581 {
582         struct adapter *adap = netdev2adap(dev);
583         struct sge_uld_txq_info *txq_info =
584                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585         struct sge_uld_txq *txq;
586         int ret = 0;
587
588         local_bh_disable();
589         txq = &txq_info->uldtxq[idx];
590         spin_lock(&txq->sendq.lock);
591         if (txq->full)
592                 ret = -1;
593         spin_unlock(&txq->sendq.lock);
594         local_bh_enable();
595         return ret;
596 }
597
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599                                struct _key_ctx *key_ctx)
600 {
601         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603         } else {
604                 memcpy(key_ctx->key,
605                        ablkctx->key + (ablkctx->enckey_len >> 1),
606                        ablkctx->enckey_len >> 1);
607                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
609         }
610         return 0;
611 }
612
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
614                              unsigned int minsg,
615                              unsigned int space,
616                              unsigned int srcskip)
617 {
618         int srclen = 0;
619         int srcsg = minsg;
620         int soffset = 0, sless;
621
622         if (sg_dma_len(src) == srcskip) {
623                 src = sg_next(src);
624                 srcskip = 0;
625         }
626         while (src && space > (sgl_ent_len[srcsg + 1])) {
627                 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
628                                                         CHCR_SRC_SG_SIZE);
629                 srclen += sless;
630                 soffset += sless;
631                 srcsg++;
632                 if (sg_dma_len(src) == (soffset + srcskip)) {
633                         src = sg_next(src);
634                         soffset = 0;
635                         srcskip = 0;
636                 }
637         }
638         return srclen;
639 }
640
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642                              struct scatterlist *dst,
643                              unsigned int minsg,
644                              unsigned int space,
645                              unsigned int srcskip,
646                              unsigned int dstskip)
647 {
648         int srclen = 0, dstlen = 0;
649         int srcsg = minsg, dstsg = minsg;
650         int offset = 0, soffset = 0, less, sless = 0;
651
652         if (sg_dma_len(src) == srcskip) {
653                 src = sg_next(src);
654                 srcskip = 0;
655         }
656         if (sg_dma_len(dst) == dstskip) {
657                 dst = sg_next(dst);
658                 dstskip = 0;
659         }
660
661         while (src && dst &&
662                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663                 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664                                 CHCR_SRC_SG_SIZE);
665                 srclen += sless;
666                 srcsg++;
667                 offset = 0;
668                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670                         if (srclen <= dstlen)
671                                 break;
672                         less = min_t(unsigned int, sg_dma_len(dst) - offset -
673                                      dstskip, CHCR_DST_SG_SIZE);
674                         dstlen += less;
675                         offset += less;
676                         if ((offset + dstskip) == sg_dma_len(dst)) {
677                                 dst = sg_next(dst);
678                                 offset = 0;
679                         }
680                         dstsg++;
681                         dstskip = 0;
682                 }
683                 soffset += sless;
684                 if ((soffset + srcskip) == sg_dma_len(src)) {
685                         src = sg_next(src);
686                         srcskip = 0;
687                         soffset = 0;
688                 }
689
690         }
691         return min(srclen, dstlen);
692 }
693
694 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
695                                 struct skcipher_request *req,
696                                 u8 *iv,
697                                 unsigned short op_type)
698 {
699         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
700         int err;
701
702         skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
703         skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
704                                       req->base.complete, req->base.data);
705         skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
706                                    req->cryptlen, iv);
707
708         err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
709                         crypto_skcipher_encrypt(&reqctx->fallback_req);
710
711         return err;
712
713 }
714
715 static inline int get_qidxs(struct crypto_async_request *req,
716                             unsigned int *txqidx, unsigned int *rxqidx)
717 {
718         struct crypto_tfm *tfm = req->tfm;
719         int ret = 0;
720
721         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
722         case CRYPTO_ALG_TYPE_AEAD:
723         {
724                 struct aead_request *aead_req =
725                         container_of(req, struct aead_request, base);
726                 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
727                 *txqidx = reqctx->txqidx;
728                 *rxqidx = reqctx->rxqidx;
729                 break;
730         }
731         case CRYPTO_ALG_TYPE_SKCIPHER:
732         {
733                 struct skcipher_request *sk_req =
734                         container_of(req, struct skcipher_request, base);
735                 struct chcr_skcipher_req_ctx *reqctx =
736                         skcipher_request_ctx(sk_req);
737                 *txqidx = reqctx->txqidx;
738                 *rxqidx = reqctx->rxqidx;
739                 break;
740         }
741         case CRYPTO_ALG_TYPE_AHASH:
742         {
743                 struct ahash_request *ahash_req =
744                         container_of(req, struct ahash_request, base);
745                 struct chcr_ahash_req_ctx *reqctx =
746                         ahash_request_ctx(ahash_req);
747                 *txqidx = reqctx->txqidx;
748                 *rxqidx = reqctx->rxqidx;
749                 break;
750         }
751         default:
752                 ret = -EINVAL;
753                 /* should never get here */
754                 BUG();
755                 break;
756         }
757         return ret;
758 }
759
760 static inline void create_wreq(struct chcr_context *ctx,
761                                struct chcr_wr *chcr_req,
762                                struct crypto_async_request *req,
763                                unsigned int imm,
764                                int hash_sz,
765                                unsigned int len16,
766                                unsigned int sc_len,
767                                unsigned int lcb)
768 {
769         struct uld_ctx *u_ctx = ULD_CTX(ctx);
770         unsigned int tx_channel_id, rx_channel_id;
771         unsigned int txqidx = 0, rxqidx = 0;
772         unsigned int qid, fid;
773
774         get_qidxs(req, &txqidx, &rxqidx);
775         qid = u_ctx->lldi.rxq_ids[rxqidx];
776         fid = u_ctx->lldi.rxq_ids[0];
777         tx_channel_id = txqidx / ctx->txq_perchan;
778         rx_channel_id = rxqidx / ctx->rxq_perchan;
779
780
781         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
782         chcr_req->wreq.pld_size_hash_size =
783                 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
784         chcr_req->wreq.len16_pkd =
785                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
786         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
787         chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
788                                                             !!lcb, txqidx);
789
790         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
791         chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
792                                 ((sizeof(chcr_req->wreq)) >> 4)));
793         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
794         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
795                                            sizeof(chcr_req->key_ctx) + sc_len);
796 }
797
798 /**
799  *      create_cipher_wr - form the WR for cipher operations
800  *      @req: cipher req.
801  *      @ctx: crypto driver context of the request.
802  *      @qid: ingress qid where response of this WR should be received.
803  *      @op_type:       encryption or decryption
804  */
805 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
806 {
807         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
808         struct chcr_context *ctx = c_ctx(tfm);
809         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
810         struct sk_buff *skb = NULL;
811         struct chcr_wr *chcr_req;
812         struct cpl_rx_phys_dsgl *phys_cpl;
813         struct ulptx_sgl *ulptx;
814         struct chcr_skcipher_req_ctx *reqctx =
815                 skcipher_request_ctx(wrparam->req);
816         unsigned int temp = 0, transhdr_len, dst_size;
817         int error;
818         int nents;
819         unsigned int kctx_len;
820         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
821                         GFP_KERNEL : GFP_ATOMIC;
822         struct adapter *adap = padap(ctx->dev);
823         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
824
825         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
826                               reqctx->dst_ofst);
827         dst_size = get_space_for_phys_dsgl(nents);
828         kctx_len = roundup(ablkctx->enckey_len, 16);
829         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
830         nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
831                                   CHCR_SRC_SG_SIZE, reqctx->src_ofst);
832         temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
833                                      (sgl_len(nents) * 8);
834         transhdr_len += temp;
835         transhdr_len = roundup(transhdr_len, 16);
836         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
837         if (!skb) {
838                 error = -ENOMEM;
839                 goto err;
840         }
841         chcr_req = __skb_put_zero(skb, transhdr_len);
842         chcr_req->sec_cpl.op_ivinsrtofst =
843                         FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
844
845         chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
846         chcr_req->sec_cpl.aadstart_cipherstop_hi =
847                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
848
849         chcr_req->sec_cpl.cipherstop_lo_authinsert =
850                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
851         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
852                                                          ablkctx->ciph_mode,
853                                                          0, 0, IV >> 1);
854         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
855                                                           0, 1, dst_size);
856
857         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
858         if ((reqctx->op == CHCR_DECRYPT_OP) &&
859             (!(get_cryptoalg_subtype(tfm) ==
860                CRYPTO_ALG_SUB_TYPE_CTR)) &&
861             (!(get_cryptoalg_subtype(tfm) ==
862                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
863                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
864         } else {
865                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
866                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
867                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
868                                ablkctx->enckey_len);
869                 } else {
870                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
871                                (ablkctx->enckey_len >> 1),
872                                ablkctx->enckey_len >> 1);
873                         memcpy(chcr_req->key_ctx.key +
874                                (ablkctx->enckey_len >> 1),
875                                ablkctx->key,
876                                ablkctx->enckey_len >> 1);
877                 }
878         }
879         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
880         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
881         chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
882         chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
883
884         atomic_inc(&adap->chcr_stats.cipher_rqst);
885         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
886                 + (reqctx->imm ? (wrparam->bytes) : 0);
887         create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
888                     transhdr_len, temp,
889                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
890         reqctx->skb = skb;
891
892         if (reqctx->op && (ablkctx->ciph_mode ==
893                            CHCR_SCMD_CIPHER_MODE_AES_CBC))
894                 sg_pcopy_to_buffer(wrparam->req->src,
895                         sg_nents(wrparam->req->src), wrparam->req->iv, 16,
896                         reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
897
898         return skb;
899 err:
900         return ERR_PTR(error);
901 }
902
903 static inline int chcr_keyctx_ck_size(unsigned int keylen)
904 {
905         int ck_size = 0;
906
907         if (keylen == AES_KEYSIZE_128)
908                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
909         else if (keylen == AES_KEYSIZE_192)
910                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
911         else if (keylen == AES_KEYSIZE_256)
912                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
913         else
914                 ck_size = 0;
915
916         return ck_size;
917 }
918 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
919                                        const u8 *key,
920                                        unsigned int keylen)
921 {
922         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
923
924         crypto_skcipher_clear_flags(ablkctx->sw_cipher,
925                                 CRYPTO_TFM_REQ_MASK);
926         crypto_skcipher_set_flags(ablkctx->sw_cipher,
927                                 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
928         return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
929 }
930
931 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
932                                const u8 *key,
933                                unsigned int keylen)
934 {
935         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
936         unsigned int ck_size, context_size;
937         u16 alignment = 0;
938         int err;
939
940         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
941         if (err)
942                 goto badkey_err;
943
944         ck_size = chcr_keyctx_ck_size(keylen);
945         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
946         memcpy(ablkctx->key, key, keylen);
947         ablkctx->enckey_len = keylen;
948         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
949         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
950                         keylen + alignment) >> 4;
951
952         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
953                                                 0, 0, context_size);
954         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
955         return 0;
956 badkey_err:
957         ablkctx->enckey_len = 0;
958
959         return err;
960 }
961
962 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
963                                    const u8 *key,
964                                    unsigned int keylen)
965 {
966         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
967         unsigned int ck_size, context_size;
968         u16 alignment = 0;
969         int err;
970
971         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
972         if (err)
973                 goto badkey_err;
974         ck_size = chcr_keyctx_ck_size(keylen);
975         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
976         memcpy(ablkctx->key, key, keylen);
977         ablkctx->enckey_len = keylen;
978         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
979                         keylen + alignment) >> 4;
980
981         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
982                                                 0, 0, context_size);
983         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
984
985         return 0;
986 badkey_err:
987         ablkctx->enckey_len = 0;
988
989         return err;
990 }
991
992 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
993                                    const u8 *key,
994                                    unsigned int keylen)
995 {
996         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
997         unsigned int ck_size, context_size;
998         u16 alignment = 0;
999         int err;
1000
1001         if (keylen < CTR_RFC3686_NONCE_SIZE)
1002                 return -EINVAL;
1003         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1004                CTR_RFC3686_NONCE_SIZE);
1005
1006         keylen -= CTR_RFC3686_NONCE_SIZE;
1007         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1008         if (err)
1009                 goto badkey_err;
1010
1011         ck_size = chcr_keyctx_ck_size(keylen);
1012         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1013         memcpy(ablkctx->key, key, keylen);
1014         ablkctx->enckey_len = keylen;
1015         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1016                         keylen + alignment) >> 4;
1017
1018         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1019                                                 0, 0, context_size);
1020         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1021
1022         return 0;
1023 badkey_err:
1024         ablkctx->enckey_len = 0;
1025
1026         return err;
1027 }
1028 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1029 {
1030         unsigned int size = AES_BLOCK_SIZE;
1031         __be32 *b = (__be32 *)(dstiv + size);
1032         u32 c, prev;
1033
1034         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1035         for (; size >= 4; size -= 4) {
1036                 prev = be32_to_cpu(*--b);
1037                 c = prev + add;
1038                 *b = cpu_to_be32(c);
1039                 if (prev < c)
1040                         break;
1041                 add = 1;
1042         }
1043
1044 }
1045
1046 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1047 {
1048         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1049         u64 c;
1050         u32 temp = be32_to_cpu(*--b);
1051
1052         temp = ~temp;
1053         c = (u64)temp +  1; // No of block can processed without overflow
1054         if ((bytes / AES_BLOCK_SIZE) >= c)
1055                 bytes = c * AES_BLOCK_SIZE;
1056         return bytes;
1057 }
1058
1059 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1060                              u32 isfinal)
1061 {
1062         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1063         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1064         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1065         struct crypto_aes_ctx aes;
1066         int ret, i;
1067         u8 *key;
1068         unsigned int keylen;
1069         int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1070         int round8 = round / 8;
1071
1072         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1073
1074         keylen = ablkctx->enckey_len / 2;
1075         key = ablkctx->key + keylen;
1076         /* For a 192 bit key remove the padded zeroes which was
1077          * added in chcr_xts_setkey
1078          */
1079         if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1080                         == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1081                 ret = aes_expandkey(&aes, key, keylen - 8);
1082         else
1083                 ret = aes_expandkey(&aes, key, keylen);
1084         if (ret)
1085                 return ret;
1086         aes_encrypt(&aes, iv, iv);
1087         for (i = 0; i < round8; i++)
1088                 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1089
1090         for (i = 0; i < (round % 8); i++)
1091                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1092
1093         if (!isfinal)
1094                 aes_decrypt(&aes, iv, iv);
1095
1096         memzero_explicit(&aes, sizeof(aes));
1097         return 0;
1098 }
1099
1100 static int chcr_update_cipher_iv(struct skcipher_request *req,
1101                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1102 {
1103         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1104         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1105         int subtype = get_cryptoalg_subtype(tfm);
1106         int ret = 0;
1107
1108         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1109                 ctr_add_iv(iv, req->iv, (reqctx->processed /
1110                            AES_BLOCK_SIZE));
1111         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1112                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1113                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1114                                                 AES_BLOCK_SIZE) + 1);
1115         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1116                 ret = chcr_update_tweak(req, iv, 0);
1117         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1118                 if (reqctx->op)
1119                         /*Updated before sending last WR*/
1120                         memcpy(iv, req->iv, AES_BLOCK_SIZE);
1121                 else
1122                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1123         }
1124
1125         return ret;
1126
1127 }
1128
1129 /* We need separate function for final iv because in rfc3686  Initial counter
1130  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1131  * for subsequent update requests
1132  */
1133
1134 static int chcr_final_cipher_iv(struct skcipher_request *req,
1135                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1136 {
1137         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1138         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1139         int subtype = get_cryptoalg_subtype(tfm);
1140         int ret = 0;
1141
1142         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1143                 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1144                                                        AES_BLOCK_SIZE));
1145         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1146                 if (!reqctx->partial_req)
1147                         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1148                 else
1149                         ret = chcr_update_tweak(req, iv, 1);
1150         }
1151         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1152                 /*Already updated for Decrypt*/
1153                 if (!reqctx->op)
1154                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1155
1156         }
1157         return ret;
1158
1159 }
1160
1161 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1162                                    unsigned char *input, int err)
1163 {
1164         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1165         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1166         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1167         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1168         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1169         struct chcr_dev *dev = c_ctx(tfm)->dev;
1170         struct chcr_context *ctx = c_ctx(tfm);
1171         struct adapter *adap = padap(ctx->dev);
1172         struct cipher_wr_param wrparam;
1173         struct sk_buff *skb;
1174         int bytes;
1175
1176         if (err)
1177                 goto unmap;
1178         if (req->cryptlen == reqctx->processed) {
1179                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1180                                       req);
1181                 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1182                 goto complete;
1183         }
1184
1185         if (!reqctx->imm) {
1186                 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1187                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1188                                           reqctx->src_ofst, reqctx->dst_ofst);
1189                 if ((bytes + reqctx->processed) >= req->cryptlen)
1190                         bytes  = req->cryptlen - reqctx->processed;
1191                 else
1192                         bytes = rounddown(bytes, 16);
1193         } else {
1194                 /*CTR mode counter overfloa*/
1195                 bytes  = req->cryptlen - reqctx->processed;
1196         }
1197         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1198         if (err)
1199                 goto unmap;
1200
1201         if (unlikely(bytes == 0)) {
1202                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1203                                       req);
1204                 memcpy(req->iv, reqctx->init_iv, IV);
1205                 atomic_inc(&adap->chcr_stats.fallback);
1206                 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1207                                            reqctx->op);
1208                 goto complete;
1209         }
1210
1211         if (get_cryptoalg_subtype(tfm) ==
1212             CRYPTO_ALG_SUB_TYPE_CTR)
1213                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1214         wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1215         wrparam.req = req;
1216         wrparam.bytes = bytes;
1217         skb = create_cipher_wr(&wrparam);
1218         if (IS_ERR(skb)) {
1219                 pr_err("%s : Failed to form WR. No memory\n", __func__);
1220                 err = PTR_ERR(skb);
1221                 goto unmap;
1222         }
1223         skb->dev = u_ctx->lldi.ports[0];
1224         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1225         chcr_send_wr(skb);
1226         reqctx->last_req_len = bytes;
1227         reqctx->processed += bytes;
1228         if (get_cryptoalg_subtype(tfm) ==
1229                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1230                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1231                 complete(&ctx->cbc_aes_aio_done);
1232         }
1233         return 0;
1234 unmap:
1235         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1236 complete:
1237         if (get_cryptoalg_subtype(tfm) ==
1238                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1239                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1240                 complete(&ctx->cbc_aes_aio_done);
1241         }
1242         chcr_dec_wrcount(dev);
1243         req->base.complete(&req->base, err);
1244         return err;
1245 }
1246
1247 static int process_cipher(struct skcipher_request *req,
1248                                   unsigned short qid,
1249                                   struct sk_buff **skb,
1250                                   unsigned short op_type)
1251 {
1252         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1253         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1254         unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1255         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1256         struct adapter *adap = padap(c_ctx(tfm)->dev);
1257         struct  cipher_wr_param wrparam;
1258         int bytes, err = -EINVAL;
1259         int subtype;
1260
1261         reqctx->processed = 0;
1262         reqctx->partial_req = 0;
1263         if (!req->iv)
1264                 goto error;
1265         subtype = get_cryptoalg_subtype(tfm);
1266         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1267             (req->cryptlen == 0) ||
1268             (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1269                 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1270                         goto fallback;
1271                 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1272                          subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1273                         goto fallback;
1274                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1275                        ablkctx->enckey_len, req->cryptlen, ivsize);
1276                 goto error;
1277         }
1278
1279         err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1280         if (err)
1281                 goto error;
1282         if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1283                                             AES_MIN_KEY_SIZE +
1284                                             sizeof(struct cpl_rx_phys_dsgl) +
1285                                         /*Min dsgl size*/
1286                                             32))) {
1287                 /* Can be sent as Imm*/
1288                 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1289
1290                 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1291                                        CHCR_DST_SG_SIZE, 0);
1292                 phys_dsgl = get_space_for_phys_dsgl(dnents);
1293                 kctx_len = roundup(ablkctx->enckey_len, 16);
1294                 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1295                 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1296                         SGE_MAX_WR_LEN;
1297                 bytes = IV + req->cryptlen;
1298
1299         } else {
1300                 reqctx->imm = 0;
1301         }
1302
1303         if (!reqctx->imm) {
1304                 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1305                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1306                                           0, 0);
1307                 if ((bytes + reqctx->processed) >= req->cryptlen)
1308                         bytes  = req->cryptlen - reqctx->processed;
1309                 else
1310                         bytes = rounddown(bytes, 16);
1311         } else {
1312                 bytes = req->cryptlen;
1313         }
1314         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1315                 bytes = adjust_ctr_overflow(req->iv, bytes);
1316         }
1317         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1318                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1319                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1320                                 CTR_RFC3686_IV_SIZE);
1321
1322                 /* initialize counter portion of counter block */
1323                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1324                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1325                 memcpy(reqctx->init_iv, reqctx->iv, IV);
1326
1327         } else {
1328
1329                 memcpy(reqctx->iv, req->iv, IV);
1330                 memcpy(reqctx->init_iv, req->iv, IV);
1331         }
1332         if (unlikely(bytes == 0)) {
1333                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1334                                       req);
1335 fallback:       atomic_inc(&adap->chcr_stats.fallback);
1336                 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1337                                            subtype ==
1338                                            CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1339                                            reqctx->iv : req->iv,
1340                                            op_type);
1341                 goto error;
1342         }
1343         reqctx->op = op_type;
1344         reqctx->srcsg = req->src;
1345         reqctx->dstsg = req->dst;
1346         reqctx->src_ofst = 0;
1347         reqctx->dst_ofst = 0;
1348         wrparam.qid = qid;
1349         wrparam.req = req;
1350         wrparam.bytes = bytes;
1351         *skb = create_cipher_wr(&wrparam);
1352         if (IS_ERR(*skb)) {
1353                 err = PTR_ERR(*skb);
1354                 goto unmap;
1355         }
1356         reqctx->processed = bytes;
1357         reqctx->last_req_len = bytes;
1358         reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1359
1360         return 0;
1361 unmap:
1362         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1363 error:
1364         return err;
1365 }
1366
1367 static int chcr_aes_encrypt(struct skcipher_request *req)
1368 {
1369         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1370         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1371         struct chcr_dev *dev = c_ctx(tfm)->dev;
1372         struct sk_buff *skb = NULL;
1373         int err;
1374         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1375         struct chcr_context *ctx = c_ctx(tfm);
1376         unsigned int cpu;
1377
1378         cpu = get_cpu();
1379         reqctx->txqidx = cpu % ctx->ntxq;
1380         reqctx->rxqidx = cpu % ctx->nrxq;
1381         put_cpu();
1382
1383         err = chcr_inc_wrcount(dev);
1384         if (err)
1385                 return -ENXIO;
1386         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1387                                                 reqctx->txqidx) &&
1388                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1389                         err = -ENOSPC;
1390                         goto error;
1391         }
1392
1393         err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1394                              &skb, CHCR_ENCRYPT_OP);
1395         if (err || !skb)
1396                 return  err;
1397         skb->dev = u_ctx->lldi.ports[0];
1398         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1399         chcr_send_wr(skb);
1400         if (get_cryptoalg_subtype(tfm) ==
1401                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1402                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1403                         reqctx->partial_req = 1;
1404                         wait_for_completion(&ctx->cbc_aes_aio_done);
1405         }
1406         return -EINPROGRESS;
1407 error:
1408         chcr_dec_wrcount(dev);
1409         return err;
1410 }
1411
1412 static int chcr_aes_decrypt(struct skcipher_request *req)
1413 {
1414         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1415         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1416         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1417         struct chcr_dev *dev = c_ctx(tfm)->dev;
1418         struct sk_buff *skb = NULL;
1419         int err;
1420         struct chcr_context *ctx = c_ctx(tfm);
1421         unsigned int cpu;
1422
1423         cpu = get_cpu();
1424         reqctx->txqidx = cpu % ctx->ntxq;
1425         reqctx->rxqidx = cpu % ctx->nrxq;
1426         put_cpu();
1427
1428         err = chcr_inc_wrcount(dev);
1429         if (err)
1430                 return -ENXIO;
1431
1432         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1433                                                 reqctx->txqidx) &&
1434                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1435                         return -ENOSPC;
1436         err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1437                              &skb, CHCR_DECRYPT_OP);
1438         if (err || !skb)
1439                 return err;
1440         skb->dev = u_ctx->lldi.ports[0];
1441         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1442         chcr_send_wr(skb);
1443         return -EINPROGRESS;
1444 }
1445 static int chcr_device_init(struct chcr_context *ctx)
1446 {
1447         struct uld_ctx *u_ctx = NULL;
1448         int txq_perchan, ntxq;
1449         int err = 0, rxq_perchan;
1450
1451         if (!ctx->dev) {
1452                 u_ctx = assign_chcr_device();
1453                 if (!u_ctx) {
1454                         err = -ENXIO;
1455                         pr_err("chcr device assignment fails\n");
1456                         goto out;
1457                 }
1458                 ctx->dev = &u_ctx->dev;
1459                 ntxq = u_ctx->lldi.ntxq;
1460                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1461                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1462                 ctx->ntxq = ntxq;
1463                 ctx->nrxq = u_ctx->lldi.nrxq;
1464                 ctx->rxq_perchan = rxq_perchan;
1465                 ctx->txq_perchan = txq_perchan;
1466         }
1467 out:
1468         return err;
1469 }
1470
1471 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1472 {
1473         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1474         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1475         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1476
1477         ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1478                                 CRYPTO_ALG_NEED_FALLBACK);
1479         if (IS_ERR(ablkctx->sw_cipher)) {
1480                 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1481                 return PTR_ERR(ablkctx->sw_cipher);
1482         }
1483         init_completion(&ctx->cbc_aes_aio_done);
1484         crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1485                                          crypto_skcipher_reqsize(ablkctx->sw_cipher));
1486
1487         return chcr_device_init(ctx);
1488 }
1489
1490 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1491 {
1492         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1493         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1494         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1495
1496         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1497          * cannot be used as fallback in chcr_handle_cipher_response
1498          */
1499         ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1500                                 CRYPTO_ALG_NEED_FALLBACK);
1501         if (IS_ERR(ablkctx->sw_cipher)) {
1502                 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1503                 return PTR_ERR(ablkctx->sw_cipher);
1504         }
1505         crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1506                                     crypto_skcipher_reqsize(ablkctx->sw_cipher));
1507         return chcr_device_init(ctx);
1508 }
1509
1510
1511 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1512 {
1513         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1514         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1515
1516         crypto_free_skcipher(ablkctx->sw_cipher);
1517 }
1518
1519 static int get_alg_config(struct algo_param *params,
1520                           unsigned int auth_size)
1521 {
1522         switch (auth_size) {
1523         case SHA1_DIGEST_SIZE:
1524                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1525                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1526                 params->result_size = SHA1_DIGEST_SIZE;
1527                 break;
1528         case SHA224_DIGEST_SIZE:
1529                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1531                 params->result_size = SHA256_DIGEST_SIZE;
1532                 break;
1533         case SHA256_DIGEST_SIZE:
1534                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1535                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1536                 params->result_size = SHA256_DIGEST_SIZE;
1537                 break;
1538         case SHA384_DIGEST_SIZE:
1539                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1541                 params->result_size = SHA512_DIGEST_SIZE;
1542                 break;
1543         case SHA512_DIGEST_SIZE:
1544                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1545                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1546                 params->result_size = SHA512_DIGEST_SIZE;
1547                 break;
1548         default:
1549                 pr_err("ERROR, unsupported digest size\n");
1550                 return -EINVAL;
1551         }
1552         return 0;
1553 }
1554
1555 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1556 {
1557                 crypto_free_shash(base_hash);
1558 }
1559
1560 /**
1561  *      create_hash_wr - Create hash work request
1562  *      @req - Cipher req base
1563  */
1564 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1565                                       struct hash_wr_param *param)
1566 {
1567         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1568         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1569         struct chcr_context *ctx = h_ctx(tfm);
1570         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1571         struct sk_buff *skb = NULL;
1572         struct uld_ctx *u_ctx = ULD_CTX(ctx);
1573         struct chcr_wr *chcr_req;
1574         struct ulptx_sgl *ulptx;
1575         unsigned int nents = 0, transhdr_len;
1576         unsigned int temp = 0;
1577         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1578                 GFP_ATOMIC;
1579         struct adapter *adap = padap(h_ctx(tfm)->dev);
1580         int error = 0;
1581         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1582
1583         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1584         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1585                                 param->sg_len) <= SGE_MAX_WR_LEN;
1586         nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1587                       CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1588         nents += param->bfr_len ? 1 : 0;
1589         transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1590                                 param->sg_len, 16) : (sgl_len(nents) * 8);
1591         transhdr_len = roundup(transhdr_len, 16);
1592
1593         skb = alloc_skb(transhdr_len, flags);
1594         if (!skb)
1595                 return ERR_PTR(-ENOMEM);
1596         chcr_req = __skb_put_zero(skb, transhdr_len);
1597
1598         chcr_req->sec_cpl.op_ivinsrtofst =
1599                 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1600
1601         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1602
1603         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1604                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1605         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1606                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1607         chcr_req->sec_cpl.seqno_numivs =
1608                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1609                                          param->opad_needed, 0);
1610
1611         chcr_req->sec_cpl.ivgen_hdrlen =
1612                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1613
1614         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1615                param->alg_prm.result_size);
1616
1617         if (param->opad_needed)
1618                 memcpy(chcr_req->key_ctx.key +
1619                        ((param->alg_prm.result_size <= 32) ? 32 :
1620                         CHCR_HASH_MAX_DIGEST_SIZE),
1621                        hmacctx->opad, param->alg_prm.result_size);
1622
1623         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1624                                             param->alg_prm.mk_size, 0,
1625                                             param->opad_needed,
1626                                             ((param->kctx_len +
1627                                              sizeof(chcr_req->key_ctx)) >> 4));
1628         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1629         ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1630                                      DUMMY_BYTES);
1631         if (param->bfr_len != 0) {
1632                 req_ctx->hctx_wr.dma_addr =
1633                         dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1634                                        param->bfr_len, DMA_TO_DEVICE);
1635                 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1636                                        req_ctx->hctx_wr. dma_addr)) {
1637                         error = -ENOMEM;
1638                         goto err;
1639                 }
1640                 req_ctx->hctx_wr.dma_len = param->bfr_len;
1641         } else {
1642                 req_ctx->hctx_wr.dma_addr = 0;
1643         }
1644         chcr_add_hash_src_ent(req, ulptx, param);
1645         /* Request upto max wr size */
1646         temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1647                                 (param->sg_len + param->bfr_len) : 0);
1648         atomic_inc(&adap->chcr_stats.digest_rqst);
1649         create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1650                     param->hash_size, transhdr_len,
1651                     temp,  0);
1652         req_ctx->hctx_wr.skb = skb;
1653         return skb;
1654 err:
1655         kfree_skb(skb);
1656         return  ERR_PTR(error);
1657 }
1658
1659 static int chcr_ahash_update(struct ahash_request *req)
1660 {
1661         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1662         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1663         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1664         struct chcr_context *ctx = h_ctx(rtfm);
1665         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1666         struct sk_buff *skb;
1667         u8 remainder = 0, bs;
1668         unsigned int nbytes = req->nbytes;
1669         struct hash_wr_param params;
1670         int error;
1671         unsigned int cpu;
1672
1673         cpu = get_cpu();
1674         req_ctx->txqidx = cpu % ctx->ntxq;
1675         req_ctx->rxqidx = cpu % ctx->nrxq;
1676         put_cpu();
1677
1678         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1679
1680         if (nbytes + req_ctx->reqlen >= bs) {
1681                 remainder = (nbytes + req_ctx->reqlen) % bs;
1682                 nbytes = nbytes + req_ctx->reqlen - remainder;
1683         } else {
1684                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1685                                    + req_ctx->reqlen, nbytes, 0);
1686                 req_ctx->reqlen += nbytes;
1687                 return 0;
1688         }
1689         error = chcr_inc_wrcount(dev);
1690         if (error)
1691                 return -ENXIO;
1692         /* Detach state for CHCR means lldi or padap is freed. Increasing
1693          * inflight count for dev guarantees that lldi and padap is valid
1694          */
1695         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1696                                                 req_ctx->txqidx) &&
1697                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1698                         error = -ENOSPC;
1699                         goto err;
1700         }
1701
1702         chcr_init_hctx_per_wr(req_ctx);
1703         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1704         if (error) {
1705                 error = -ENOMEM;
1706                 goto err;
1707         }
1708         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1709         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1710         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1711                                      HASH_SPACE_LEFT(params.kctx_len), 0);
1712         if (params.sg_len > req->nbytes)
1713                 params.sg_len = req->nbytes;
1714         params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1715                         req_ctx->reqlen;
1716         params.opad_needed = 0;
1717         params.more = 1;
1718         params.last = 0;
1719         params.bfr_len = req_ctx->reqlen;
1720         params.scmd1 = 0;
1721         req_ctx->hctx_wr.srcsg = req->src;
1722
1723         params.hash_size = params.alg_prm.result_size;
1724         req_ctx->data_len += params.sg_len + params.bfr_len;
1725         skb = create_hash_wr(req, &params);
1726         if (IS_ERR(skb)) {
1727                 error = PTR_ERR(skb);
1728                 goto unmap;
1729         }
1730
1731         req_ctx->hctx_wr.processed += params.sg_len;
1732         if (remainder) {
1733                 /* Swap buffers */
1734                 swap(req_ctx->reqbfr, req_ctx->skbfr);
1735                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1736                                    req_ctx->reqbfr, remainder, req->nbytes -
1737                                    remainder);
1738         }
1739         req_ctx->reqlen = remainder;
1740         skb->dev = u_ctx->lldi.ports[0];
1741         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1742         chcr_send_wr(skb);
1743         return -EINPROGRESS;
1744 unmap:
1745         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1746 err:
1747         chcr_dec_wrcount(dev);
1748         return error;
1749 }
1750
1751 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1752 {
1753         memset(bfr_ptr, 0, bs);
1754         *bfr_ptr = 0x80;
1755         if (bs == 64)
1756                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1757         else
1758                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1759 }
1760
1761 static int chcr_ahash_final(struct ahash_request *req)
1762 {
1763         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1764         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1765         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1766         struct hash_wr_param params;
1767         struct sk_buff *skb;
1768         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1769         struct chcr_context *ctx = h_ctx(rtfm);
1770         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1771         int error;
1772         unsigned int cpu;
1773
1774         cpu = get_cpu();
1775         req_ctx->txqidx = cpu % ctx->ntxq;
1776         req_ctx->rxqidx = cpu % ctx->nrxq;
1777         put_cpu();
1778
1779         error = chcr_inc_wrcount(dev);
1780         if (error)
1781                 return -ENXIO;
1782
1783         chcr_init_hctx_per_wr(req_ctx);
1784         if (is_hmac(crypto_ahash_tfm(rtfm)))
1785                 params.opad_needed = 1;
1786         else
1787                 params.opad_needed = 0;
1788         params.sg_len = 0;
1789         req_ctx->hctx_wr.isfinal = 1;
1790         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1791         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1792         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1793                 params.opad_needed = 1;
1794                 params.kctx_len *= 2;
1795         } else {
1796                 params.opad_needed = 0;
1797         }
1798
1799         req_ctx->hctx_wr.result = 1;
1800         params.bfr_len = req_ctx->reqlen;
1801         req_ctx->data_len += params.bfr_len + params.sg_len;
1802         req_ctx->hctx_wr.srcsg = req->src;
1803         if (req_ctx->reqlen == 0) {
1804                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1805                 params.last = 0;
1806                 params.more = 1;
1807                 params.scmd1 = 0;
1808                 params.bfr_len = bs;
1809
1810         } else {
1811                 params.scmd1 = req_ctx->data_len;
1812                 params.last = 1;
1813                 params.more = 0;
1814         }
1815         params.hash_size = crypto_ahash_digestsize(rtfm);
1816         skb = create_hash_wr(req, &params);
1817         if (IS_ERR(skb)) {
1818                 error = PTR_ERR(skb);
1819                 goto err;
1820         }
1821         req_ctx->reqlen = 0;
1822         skb->dev = u_ctx->lldi.ports[0];
1823         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1824         chcr_send_wr(skb);
1825         return -EINPROGRESS;
1826 err:
1827         chcr_dec_wrcount(dev);
1828         return error;
1829 }
1830
1831 static int chcr_ahash_finup(struct ahash_request *req)
1832 {
1833         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1834         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1835         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1836         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1837         struct chcr_context *ctx = h_ctx(rtfm);
1838         struct sk_buff *skb;
1839         struct hash_wr_param params;
1840         u8  bs;
1841         int error;
1842         unsigned int cpu;
1843
1844         cpu = get_cpu();
1845         req_ctx->txqidx = cpu % ctx->ntxq;
1846         req_ctx->rxqidx = cpu % ctx->nrxq;
1847         put_cpu();
1848
1849         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1850         error = chcr_inc_wrcount(dev);
1851         if (error)
1852                 return -ENXIO;
1853
1854         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1855                                                 req_ctx->txqidx) &&
1856                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1857                         error = -ENOSPC;
1858                         goto err;
1859         }
1860         chcr_init_hctx_per_wr(req_ctx);
1861         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1862         if (error) {
1863                 error = -ENOMEM;
1864                 goto err;
1865         }
1866
1867         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1868         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1869         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1870                 params.kctx_len *= 2;
1871                 params.opad_needed = 1;
1872         } else {
1873                 params.opad_needed = 0;
1874         }
1875
1876         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1877                                     HASH_SPACE_LEFT(params.kctx_len), 0);
1878         if (params.sg_len < req->nbytes) {
1879                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1880                         params.kctx_len /= 2;
1881                         params.opad_needed = 0;
1882                 }
1883                 params.last = 0;
1884                 params.more = 1;
1885                 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1886                                         - req_ctx->reqlen;
1887                 params.hash_size = params.alg_prm.result_size;
1888                 params.scmd1 = 0;
1889         } else {
1890                 params.last = 1;
1891                 params.more = 0;
1892                 params.sg_len = req->nbytes;
1893                 params.hash_size = crypto_ahash_digestsize(rtfm);
1894                 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1895                                 params.sg_len;
1896         }
1897         params.bfr_len = req_ctx->reqlen;
1898         req_ctx->data_len += params.bfr_len + params.sg_len;
1899         req_ctx->hctx_wr.result = 1;
1900         req_ctx->hctx_wr.srcsg = req->src;
1901         if ((req_ctx->reqlen + req->nbytes) == 0) {
1902                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1903                 params.last = 0;
1904                 params.more = 1;
1905                 params.scmd1 = 0;
1906                 params.bfr_len = bs;
1907         }
1908         skb = create_hash_wr(req, &params);
1909         if (IS_ERR(skb)) {
1910                 error = PTR_ERR(skb);
1911                 goto unmap;
1912         }
1913         req_ctx->reqlen = 0;
1914         req_ctx->hctx_wr.processed += params.sg_len;
1915         skb->dev = u_ctx->lldi.ports[0];
1916         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1917         chcr_send_wr(skb);
1918         return -EINPROGRESS;
1919 unmap:
1920         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1921 err:
1922         chcr_dec_wrcount(dev);
1923         return error;
1924 }
1925
1926 static int chcr_ahash_digest(struct ahash_request *req)
1927 {
1928         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1929         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1930         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1931         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1932         struct chcr_context *ctx = h_ctx(rtfm);
1933         struct sk_buff *skb;
1934         struct hash_wr_param params;
1935         u8  bs;
1936         int error;
1937         unsigned int cpu;
1938
1939         cpu = get_cpu();
1940         req_ctx->txqidx = cpu % ctx->ntxq;
1941         req_ctx->rxqidx = cpu % ctx->nrxq;
1942         put_cpu();
1943
1944         rtfm->init(req);
1945         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1946         error = chcr_inc_wrcount(dev);
1947         if (error)
1948                 return -ENXIO;
1949
1950         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1951                                                 req_ctx->txqidx) &&
1952                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1953                         error = -ENOSPC;
1954                         goto err;
1955         }
1956
1957         chcr_init_hctx_per_wr(req_ctx);
1958         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1959         if (error) {
1960                 error = -ENOMEM;
1961                 goto err;
1962         }
1963
1964         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1965         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1966         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1967                 params.kctx_len *= 2;
1968                 params.opad_needed = 1;
1969         } else {
1970                 params.opad_needed = 0;
1971         }
1972         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1973                                 HASH_SPACE_LEFT(params.kctx_len), 0);
1974         if (params.sg_len < req->nbytes) {
1975                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1976                         params.kctx_len /= 2;
1977                         params.opad_needed = 0;
1978                 }
1979                 params.last = 0;
1980                 params.more = 1;
1981                 params.scmd1 = 0;
1982                 params.sg_len = rounddown(params.sg_len, bs);
1983                 params.hash_size = params.alg_prm.result_size;
1984         } else {
1985                 params.sg_len = req->nbytes;
1986                 params.hash_size = crypto_ahash_digestsize(rtfm);
1987                 params.last = 1;
1988                 params.more = 0;
1989                 params.scmd1 = req->nbytes + req_ctx->data_len;
1990
1991         }
1992         params.bfr_len = 0;
1993         req_ctx->hctx_wr.result = 1;
1994         req_ctx->hctx_wr.srcsg = req->src;
1995         req_ctx->data_len += params.bfr_len + params.sg_len;
1996
1997         if (req->nbytes == 0) {
1998                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1999                 params.more = 1;
2000                 params.bfr_len = bs;
2001         }
2002
2003         skb = create_hash_wr(req, &params);
2004         if (IS_ERR(skb)) {
2005                 error = PTR_ERR(skb);
2006                 goto unmap;
2007         }
2008         req_ctx->hctx_wr.processed += params.sg_len;
2009         skb->dev = u_ctx->lldi.ports[0];
2010         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2011         chcr_send_wr(skb);
2012         return -EINPROGRESS;
2013 unmap:
2014         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2015 err:
2016         chcr_dec_wrcount(dev);
2017         return error;
2018 }
2019
2020 static int chcr_ahash_continue(struct ahash_request *req)
2021 {
2022         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2023         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2024         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2025         struct chcr_context *ctx = h_ctx(rtfm);
2026         struct uld_ctx *u_ctx = ULD_CTX(ctx);
2027         struct sk_buff *skb;
2028         struct hash_wr_param params;
2029         u8  bs;
2030         int error;
2031         unsigned int cpu;
2032
2033         cpu = get_cpu();
2034         reqctx->txqidx = cpu % ctx->ntxq;
2035         reqctx->rxqidx = cpu % ctx->nrxq;
2036         put_cpu();
2037
2038         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2039         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2040         params.kctx_len = roundup(params.alg_prm.result_size, 16);
2041         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2042                 params.kctx_len *= 2;
2043                 params.opad_needed = 1;
2044         } else {
2045                 params.opad_needed = 0;
2046         }
2047         params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2048                                             HASH_SPACE_LEFT(params.kctx_len),
2049                                             hctx_wr->src_ofst);
2050         if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2051                 params.sg_len = req->nbytes - hctx_wr->processed;
2052         if (!hctx_wr->result ||
2053             ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2054                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2055                         params.kctx_len /= 2;
2056                         params.opad_needed = 0;
2057                 }
2058                 params.last = 0;
2059                 params.more = 1;
2060                 params.sg_len = rounddown(params.sg_len, bs);
2061                 params.hash_size = params.alg_prm.result_size;
2062                 params.scmd1 = 0;
2063         } else {
2064                 params.last = 1;
2065                 params.more = 0;
2066                 params.hash_size = crypto_ahash_digestsize(rtfm);
2067                 params.scmd1 = reqctx->data_len + params.sg_len;
2068         }
2069         params.bfr_len = 0;
2070         reqctx->data_len += params.sg_len;
2071         skb = create_hash_wr(req, &params);
2072         if (IS_ERR(skb)) {
2073                 error = PTR_ERR(skb);
2074                 goto err;
2075         }
2076         hctx_wr->processed += params.sg_len;
2077         skb->dev = u_ctx->lldi.ports[0];
2078         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2079         chcr_send_wr(skb);
2080         return 0;
2081 err:
2082         return error;
2083 }
2084
2085 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2086                                           unsigned char *input,
2087                                           int err)
2088 {
2089         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2090         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2091         int digestsize, updated_digestsize;
2092         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2093         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2094         struct chcr_dev *dev = h_ctx(tfm)->dev;
2095
2096         if (input == NULL)
2097                 goto out;
2098         digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2099         updated_digestsize = digestsize;
2100         if (digestsize == SHA224_DIGEST_SIZE)
2101                 updated_digestsize = SHA256_DIGEST_SIZE;
2102         else if (digestsize == SHA384_DIGEST_SIZE)
2103                 updated_digestsize = SHA512_DIGEST_SIZE;
2104
2105         if (hctx_wr->dma_addr) {
2106                 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2107                                  hctx_wr->dma_len, DMA_TO_DEVICE);
2108                 hctx_wr->dma_addr = 0;
2109         }
2110         if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2111                                  req->nbytes)) {
2112                 if (hctx_wr->result == 1) {
2113                         hctx_wr->result = 0;
2114                         memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2115                                digestsize);
2116                 } else {
2117                         memcpy(reqctx->partial_hash,
2118                                input + sizeof(struct cpl_fw6_pld),
2119                                updated_digestsize);
2120
2121                 }
2122                 goto unmap;
2123         }
2124         memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2125                updated_digestsize);
2126
2127         err = chcr_ahash_continue(req);
2128         if (err)
2129                 goto unmap;
2130         return;
2131 unmap:
2132         if (hctx_wr->is_sg_map)
2133                 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2134
2135
2136 out:
2137         chcr_dec_wrcount(dev);
2138         req->base.complete(&req->base, err);
2139 }
2140
2141 /*
2142  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
2143  *      @req: crypto request
2144  */
2145 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2146                          int err)
2147 {
2148         struct crypto_tfm *tfm = req->tfm;
2149         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2150         struct adapter *adap = padap(ctx->dev);
2151
2152         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2153         case CRYPTO_ALG_TYPE_AEAD:
2154                 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2155                 break;
2156
2157         case CRYPTO_ALG_TYPE_SKCIPHER:
2158                  chcr_handle_cipher_resp(skcipher_request_cast(req),
2159                                                input, err);
2160                 break;
2161         case CRYPTO_ALG_TYPE_AHASH:
2162                 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2163                 }
2164         atomic_inc(&adap->chcr_stats.complete);
2165         return err;
2166 }
2167 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2168 {
2169         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2170         struct chcr_ahash_req_ctx *state = out;
2171
2172         state->reqlen = req_ctx->reqlen;
2173         state->data_len = req_ctx->data_len;
2174         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2175         memcpy(state->partial_hash, req_ctx->partial_hash,
2176                CHCR_HASH_MAX_DIGEST_SIZE);
2177         chcr_init_hctx_per_wr(state);
2178         return 0;
2179 }
2180
2181 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2182 {
2183         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2184         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2185
2186         req_ctx->reqlen = state->reqlen;
2187         req_ctx->data_len = state->data_len;
2188         req_ctx->reqbfr = req_ctx->bfr1;
2189         req_ctx->skbfr = req_ctx->bfr2;
2190         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2191         memcpy(req_ctx->partial_hash, state->partial_hash,
2192                CHCR_HASH_MAX_DIGEST_SIZE);
2193         chcr_init_hctx_per_wr(req_ctx);
2194         return 0;
2195 }
2196
2197 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2198                              unsigned int keylen)
2199 {
2200         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2201         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2202         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2203         unsigned int i, err = 0, updated_digestsize;
2204
2205         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2206
2207         /* use the key to calculate the ipad and opad. ipad will sent with the
2208          * first request's data. opad will be sent with the final hash result
2209          * ipad in hmacctx->ipad and opad in hmacctx->opad location
2210          */
2211         shash->tfm = hmacctx->base_hash;
2212         if (keylen > bs) {
2213                 err = crypto_shash_digest(shash, key, keylen,
2214                                           hmacctx->ipad);
2215                 if (err)
2216                         goto out;
2217                 keylen = digestsize;
2218         } else {
2219                 memcpy(hmacctx->ipad, key, keylen);
2220         }
2221         memset(hmacctx->ipad + keylen, 0, bs - keylen);
2222         memcpy(hmacctx->opad, hmacctx->ipad, bs);
2223
2224         for (i = 0; i < bs / sizeof(int); i++) {
2225                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2226                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2227         }
2228
2229         updated_digestsize = digestsize;
2230         if (digestsize == SHA224_DIGEST_SIZE)
2231                 updated_digestsize = SHA256_DIGEST_SIZE;
2232         else if (digestsize == SHA384_DIGEST_SIZE)
2233                 updated_digestsize = SHA512_DIGEST_SIZE;
2234         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2235                                         hmacctx->ipad, digestsize);
2236         if (err)
2237                 goto out;
2238         chcr_change_order(hmacctx->ipad, updated_digestsize);
2239
2240         err = chcr_compute_partial_hash(shash, hmacctx->opad,
2241                                         hmacctx->opad, digestsize);
2242         if (err)
2243                 goto out;
2244         chcr_change_order(hmacctx->opad, updated_digestsize);
2245 out:
2246         return err;
2247 }
2248
2249 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2250                                unsigned int key_len)
2251 {
2252         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2253         unsigned short context_size = 0;
2254         int err;
2255
2256         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2257         if (err)
2258                 goto badkey_err;
2259
2260         memcpy(ablkctx->key, key, key_len);
2261         ablkctx->enckey_len = key_len;
2262         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2263         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2264         /* Both keys for xts must be aligned to 16 byte boundary
2265          * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2266          */
2267         if (key_len == 48) {
2268                 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2269                                 + 16) >> 4;
2270                 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2271                 memset(ablkctx->key + 24, 0, 8);
2272                 memset(ablkctx->key + 56, 0, 8);
2273                 ablkctx->enckey_len = 64;
2274                 ablkctx->key_ctx_hdr =
2275                         FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2276                                          CHCR_KEYCTX_NO_KEY, 1,
2277                                          0, context_size);
2278         } else {
2279                 ablkctx->key_ctx_hdr =
2280                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2281                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2282                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2283                                  CHCR_KEYCTX_NO_KEY, 1,
2284                                  0, context_size);
2285         }
2286         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2287         return 0;
2288 badkey_err:
2289         ablkctx->enckey_len = 0;
2290
2291         return err;
2292 }
2293
2294 static int chcr_sha_init(struct ahash_request *areq)
2295 {
2296         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2297         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2298         int digestsize =  crypto_ahash_digestsize(tfm);
2299
2300         req_ctx->data_len = 0;
2301         req_ctx->reqlen = 0;
2302         req_ctx->reqbfr = req_ctx->bfr1;
2303         req_ctx->skbfr = req_ctx->bfr2;
2304         copy_hash_init_values(req_ctx->partial_hash, digestsize);
2305
2306         return 0;
2307 }
2308
2309 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2310 {
2311         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2312                                  sizeof(struct chcr_ahash_req_ctx));
2313         return chcr_device_init(crypto_tfm_ctx(tfm));
2314 }
2315
2316 static int chcr_hmac_init(struct ahash_request *areq)
2317 {
2318         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2319         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2320         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2321         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2322         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2323
2324         chcr_sha_init(areq);
2325         req_ctx->data_len = bs;
2326         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2327                 if (digestsize == SHA224_DIGEST_SIZE)
2328                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2329                                SHA256_DIGEST_SIZE);
2330                 else if (digestsize == SHA384_DIGEST_SIZE)
2331                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2332                                SHA512_DIGEST_SIZE);
2333                 else
2334                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2335                                digestsize);
2336         }
2337         return 0;
2338 }
2339
2340 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2341 {
2342         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2343         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2344         unsigned int digestsize =
2345                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2346
2347         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2348                                  sizeof(struct chcr_ahash_req_ctx));
2349         hmacctx->base_hash = chcr_alloc_shash(digestsize);
2350         if (IS_ERR(hmacctx->base_hash))
2351                 return PTR_ERR(hmacctx->base_hash);
2352         return chcr_device_init(crypto_tfm_ctx(tfm));
2353 }
2354
2355 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2356 {
2357         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2358         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2359
2360         if (hmacctx->base_hash) {
2361                 chcr_free_shash(hmacctx->base_hash);
2362                 hmacctx->base_hash = NULL;
2363         }
2364 }
2365
2366 inline void chcr_aead_common_exit(struct aead_request *req)
2367 {
2368         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2369         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2370         struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2371
2372         chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2373 }
2374
2375 static int chcr_aead_common_init(struct aead_request *req)
2376 {
2377         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2378         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2379         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2380         unsigned int authsize = crypto_aead_authsize(tfm);
2381         int error = -EINVAL;
2382
2383         /* validate key size */
2384         if (aeadctx->enckey_len == 0)
2385                 goto err;
2386         if (reqctx->op && req->cryptlen < authsize)
2387                 goto err;
2388         if (reqctx->b0_len)
2389                 reqctx->scratch_pad = reqctx->iv + IV;
2390         else
2391                 reqctx->scratch_pad = NULL;
2392
2393         error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2394                                   reqctx->op);
2395         if (error) {
2396                 error = -ENOMEM;
2397                 goto err;
2398         }
2399
2400         return 0;
2401 err:
2402         return error;
2403 }
2404
2405 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2406                                    int aadmax, int wrlen,
2407                                    unsigned short op_type)
2408 {
2409         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2410
2411         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2412             dst_nents > MAX_DSGL_ENT ||
2413             (req->assoclen > aadmax) ||
2414             (wrlen > SGE_MAX_WR_LEN))
2415                 return 1;
2416         return 0;
2417 }
2418
2419 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2420 {
2421         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2422         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2423         struct aead_request *subreq = aead_request_ctx(req);
2424
2425         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2426         aead_request_set_callback(subreq, req->base.flags,
2427                                   req->base.complete, req->base.data);
2428         aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2429                                  req->iv);
2430         aead_request_set_ad(subreq, req->assoclen);
2431         return op_type ? crypto_aead_decrypt(subreq) :
2432                 crypto_aead_encrypt(subreq);
2433 }
2434
2435 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2436                                          unsigned short qid,
2437                                          int size)
2438 {
2439         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2440         struct chcr_context *ctx = a_ctx(tfm);
2441         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2442         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2443         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2444         struct sk_buff *skb = NULL;
2445         struct chcr_wr *chcr_req;
2446         struct cpl_rx_phys_dsgl *phys_cpl;
2447         struct ulptx_sgl *ulptx;
2448         unsigned int transhdr_len;
2449         unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2450         unsigned int   kctx_len = 0, dnents, snents;
2451         unsigned int  authsize = crypto_aead_authsize(tfm);
2452         int error = -EINVAL;
2453         u8 *ivptr;
2454         int null = 0;
2455         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2456                 GFP_ATOMIC;
2457         struct adapter *adap = padap(ctx->dev);
2458         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2459
2460         if (req->cryptlen == 0)
2461                 return NULL;
2462
2463         reqctx->b0_len = 0;
2464         error = chcr_aead_common_init(req);
2465         if (error)
2466                 return ERR_PTR(error);
2467
2468         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2469                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2470                 null = 1;
2471         }
2472         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2473                 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2474         dnents += MIN_AUTH_SG; // For IV
2475         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2476                                CHCR_SRC_SG_SIZE, 0);
2477         dst_size = get_space_for_phys_dsgl(dnents);
2478         kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2479                 - sizeof(chcr_req->key_ctx);
2480         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2481         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2482                         SGE_MAX_WR_LEN;
2483         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2484                         : (sgl_len(snents) * 8);
2485         transhdr_len += temp;
2486         transhdr_len = roundup(transhdr_len, 16);
2487
2488         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2489                                     transhdr_len, reqctx->op)) {
2490                 atomic_inc(&adap->chcr_stats.fallback);
2491                 chcr_aead_common_exit(req);
2492                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2493         }
2494         skb = alloc_skb(transhdr_len, flags);
2495         if (!skb) {
2496                 error = -ENOMEM;
2497                 goto err;
2498         }
2499
2500         chcr_req = __skb_put_zero(skb, transhdr_len);
2501
2502         temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2503
2504         /*
2505          * Input order  is AAD,IV and Payload. where IV should be included as
2506          * the part of authdata. All other fields should be filled according
2507          * to the hardware spec
2508          */
2509         chcr_req->sec_cpl.op_ivinsrtofst =
2510                                 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2511         chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2512         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2513                                         null ? 0 : 1 + IV,
2514                                         null ? 0 : IV + req->assoclen,
2515                                         req->assoclen + IV + 1,
2516                                         (temp & 0x1F0) >> 4);
2517         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2518                                         temp & 0xF,
2519                                         null ? 0 : req->assoclen + IV + 1,
2520                                         temp, temp);
2521         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2522             subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2523                 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2524         else
2525                 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2526         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2527                                         (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2528                                         temp,
2529                                         actx->auth_mode, aeadctx->hmac_ctrl,
2530                                         IV >> 1);
2531         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2532                                          0, 0, dst_size);
2533
2534         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2535         if (reqctx->op == CHCR_ENCRYPT_OP ||
2536                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2537                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2538                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2539                        aeadctx->enckey_len);
2540         else
2541                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2542                        aeadctx->enckey_len);
2543
2544         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2545                actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2546         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2547         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2548         ulptx = (struct ulptx_sgl *)(ivptr + IV);
2549         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2550             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2551                 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2552                 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2553                                 CTR_RFC3686_IV_SIZE);
2554                 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2555                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2556         } else {
2557                 memcpy(ivptr, req->iv, IV);
2558         }
2559         chcr_add_aead_dst_ent(req, phys_cpl, qid);
2560         chcr_add_aead_src_ent(req, ulptx);
2561         atomic_inc(&adap->chcr_stats.cipher_rqst);
2562         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2563                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2564         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2565                    transhdr_len, temp, 0);
2566         reqctx->skb = skb;
2567
2568         return skb;
2569 err:
2570         chcr_aead_common_exit(req);
2571
2572         return ERR_PTR(error);
2573 }
2574
2575 int chcr_aead_dma_map(struct device *dev,
2576                       struct aead_request *req,
2577                       unsigned short op_type)
2578 {
2579         int error;
2580         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2581         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2582         unsigned int authsize = crypto_aead_authsize(tfm);
2583         int src_len, dst_len;
2584
2585         /* calculate and handle src and dst sg length separately
2586          * for inplace and out-of place operations
2587          */
2588         if (req->src == req->dst) {
2589                 src_len = req->assoclen + req->cryptlen + (op_type ?
2590                                                         0 : authsize);
2591                 dst_len = src_len;
2592         } else {
2593                 src_len = req->assoclen + req->cryptlen;
2594                 dst_len = req->assoclen + req->cryptlen + (op_type ?
2595                                                         -authsize : authsize);
2596         }
2597
2598         if (!req->cryptlen || !src_len || !dst_len)
2599                 return 0;
2600         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2601                                         DMA_BIDIRECTIONAL);
2602         if (dma_mapping_error(dev, reqctx->iv_dma))
2603                 return -ENOMEM;
2604         if (reqctx->b0_len)
2605                 reqctx->b0_dma = reqctx->iv_dma + IV;
2606         else
2607                 reqctx->b0_dma = 0;
2608         if (req->src == req->dst) {
2609                 error = dma_map_sg(dev, req->src,
2610                                 sg_nents_for_len(req->src, src_len),
2611                                         DMA_BIDIRECTIONAL);
2612                 if (!error)
2613                         goto err;
2614         } else {
2615                 error = dma_map_sg(dev, req->src,
2616                                    sg_nents_for_len(req->src, src_len),
2617                                    DMA_TO_DEVICE);
2618                 if (!error)
2619                         goto err;
2620                 error = dma_map_sg(dev, req->dst,
2621                                    sg_nents_for_len(req->dst, dst_len),
2622                                    DMA_FROM_DEVICE);
2623                 if (!error) {
2624                         dma_unmap_sg(dev, req->src,
2625                                      sg_nents_for_len(req->src, src_len),
2626                                      DMA_TO_DEVICE);
2627                         goto err;
2628                 }
2629         }
2630
2631         return 0;
2632 err:
2633         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2634         return -ENOMEM;
2635 }
2636
2637 void chcr_aead_dma_unmap(struct device *dev,
2638                          struct aead_request *req,
2639                          unsigned short op_type)
2640 {
2641         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2642         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2643         unsigned int authsize = crypto_aead_authsize(tfm);
2644         int src_len, dst_len;
2645
2646         /* calculate and handle src and dst sg length separately
2647          * for inplace and out-of place operations
2648          */
2649         if (req->src == req->dst) {
2650                 src_len = req->assoclen + req->cryptlen + (op_type ?
2651                                                         0 : authsize);
2652                 dst_len = src_len;
2653         } else {
2654                 src_len = req->assoclen + req->cryptlen;
2655                 dst_len = req->assoclen + req->cryptlen + (op_type ?
2656                                                 -authsize : authsize);
2657         }
2658
2659         if (!req->cryptlen || !src_len || !dst_len)
2660                 return;
2661
2662         dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2663                                         DMA_BIDIRECTIONAL);
2664         if (req->src == req->dst) {
2665                 dma_unmap_sg(dev, req->src,
2666                              sg_nents_for_len(req->src, src_len),
2667                              DMA_BIDIRECTIONAL);
2668         } else {
2669                 dma_unmap_sg(dev, req->src,
2670                              sg_nents_for_len(req->src, src_len),
2671                              DMA_TO_DEVICE);
2672                 dma_unmap_sg(dev, req->dst,
2673                              sg_nents_for_len(req->dst, dst_len),
2674                              DMA_FROM_DEVICE);
2675         }
2676 }
2677
2678 void chcr_add_aead_src_ent(struct aead_request *req,
2679                            struct ulptx_sgl *ulptx)
2680 {
2681         struct ulptx_walk ulp_walk;
2682         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2683
2684         if (reqctx->imm) {
2685                 u8 *buf = (u8 *)ulptx;
2686
2687                 if (reqctx->b0_len) {
2688                         memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2689                         buf += reqctx->b0_len;
2690                 }
2691                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2692                                    buf, req->cryptlen + req->assoclen, 0);
2693         } else {
2694                 ulptx_walk_init(&ulp_walk, ulptx);
2695                 if (reqctx->b0_len)
2696                         ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2697                                             reqctx->b0_dma);
2698                 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2699                                   req->assoclen,  0);
2700                 ulptx_walk_end(&ulp_walk);
2701         }
2702 }
2703
2704 void chcr_add_aead_dst_ent(struct aead_request *req,
2705                            struct cpl_rx_phys_dsgl *phys_cpl,
2706                            unsigned short qid)
2707 {
2708         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2709         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2710         struct dsgl_walk dsgl_walk;
2711         unsigned int authsize = crypto_aead_authsize(tfm);
2712         struct chcr_context *ctx = a_ctx(tfm);
2713         u32 temp;
2714         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2715
2716         dsgl_walk_init(&dsgl_walk, phys_cpl);
2717         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2718         temp = req->assoclen + req->cryptlen +
2719                 (reqctx->op ? -authsize : authsize);
2720         dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2721         dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2722 }
2723
2724 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2725                              void *ulptx,
2726                              struct  cipher_wr_param *wrparam)
2727 {
2728         struct ulptx_walk ulp_walk;
2729         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2730         u8 *buf = ulptx;
2731
2732         memcpy(buf, reqctx->iv, IV);
2733         buf += IV;
2734         if (reqctx->imm) {
2735                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2736                                    buf, wrparam->bytes, reqctx->processed);
2737         } else {
2738                 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2739                 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2740                                   reqctx->src_ofst);
2741                 reqctx->srcsg = ulp_walk.last_sg;
2742                 reqctx->src_ofst = ulp_walk.last_sg_len;
2743                 ulptx_walk_end(&ulp_walk);
2744         }
2745 }
2746
2747 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2748                              struct cpl_rx_phys_dsgl *phys_cpl,
2749                              struct  cipher_wr_param *wrparam,
2750                              unsigned short qid)
2751 {
2752         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2753         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2754         struct chcr_context *ctx = c_ctx(tfm);
2755         struct dsgl_walk dsgl_walk;
2756         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2757
2758         dsgl_walk_init(&dsgl_walk, phys_cpl);
2759         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2760                          reqctx->dst_ofst);
2761         reqctx->dstsg = dsgl_walk.last_sg;
2762         reqctx->dst_ofst = dsgl_walk.last_sg_len;
2763         dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2764 }
2765
2766 void chcr_add_hash_src_ent(struct ahash_request *req,
2767                            struct ulptx_sgl *ulptx,
2768                            struct hash_wr_param *param)
2769 {
2770         struct ulptx_walk ulp_walk;
2771         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2772
2773         if (reqctx->hctx_wr.imm) {
2774                 u8 *buf = (u8 *)ulptx;
2775
2776                 if (param->bfr_len) {
2777                         memcpy(buf, reqctx->reqbfr, param->bfr_len);
2778                         buf += param->bfr_len;
2779                 }
2780
2781                 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2782                                    sg_nents(reqctx->hctx_wr.srcsg), buf,
2783                                    param->sg_len, 0);
2784         } else {
2785                 ulptx_walk_init(&ulp_walk, ulptx);
2786                 if (param->bfr_len)
2787                         ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2788                                             reqctx->hctx_wr.dma_addr);
2789                 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2790                                   param->sg_len, reqctx->hctx_wr.src_ofst);
2791                 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2792                 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2793                 ulptx_walk_end(&ulp_walk);
2794         }
2795 }
2796
2797 int chcr_hash_dma_map(struct device *dev,
2798                       struct ahash_request *req)
2799 {
2800         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2801         int error = 0;
2802
2803         if (!req->nbytes)
2804                 return 0;
2805         error = dma_map_sg(dev, req->src, sg_nents(req->src),
2806                            DMA_TO_DEVICE);
2807         if (!error)
2808                 return -ENOMEM;
2809         req_ctx->hctx_wr.is_sg_map = 1;
2810         return 0;
2811 }
2812
2813 void chcr_hash_dma_unmap(struct device *dev,
2814                          struct ahash_request *req)
2815 {
2816         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2817
2818         if (!req->nbytes)
2819                 return;
2820
2821         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2822                            DMA_TO_DEVICE);
2823         req_ctx->hctx_wr.is_sg_map = 0;
2824
2825 }
2826
2827 int chcr_cipher_dma_map(struct device *dev,
2828                         struct skcipher_request *req)
2829 {
2830         int error;
2831
2832         if (req->src == req->dst) {
2833                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2834                                    DMA_BIDIRECTIONAL);
2835                 if (!error)
2836                         goto err;
2837         } else {
2838                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2839                                    DMA_TO_DEVICE);
2840                 if (!error)
2841                         goto err;
2842                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2843                                    DMA_FROM_DEVICE);
2844                 if (!error) {
2845                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2846                                    DMA_TO_DEVICE);
2847                         goto err;
2848                 }
2849         }
2850
2851         return 0;
2852 err:
2853         return -ENOMEM;
2854 }
2855
2856 void chcr_cipher_dma_unmap(struct device *dev,
2857                            struct skcipher_request *req)
2858 {
2859         if (req->src == req->dst) {
2860                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2861                                    DMA_BIDIRECTIONAL);
2862         } else {
2863                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2864                                    DMA_TO_DEVICE);
2865                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2866                                    DMA_FROM_DEVICE);
2867         }
2868 }
2869
2870 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2871 {
2872         __be32 data;
2873
2874         memset(block, 0, csize);
2875         block += csize;
2876
2877         if (csize >= 4)
2878                 csize = 4;
2879         else if (msglen > (unsigned int)(1 << (8 * csize)))
2880                 return -EOVERFLOW;
2881
2882         data = cpu_to_be32(msglen);
2883         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2884
2885         return 0;
2886 }
2887
2888 static int generate_b0(struct aead_request *req, u8 *ivptr,
2889                         unsigned short op_type)
2890 {
2891         unsigned int l, lp, m;
2892         int rc;
2893         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2894         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2895         u8 *b0 = reqctx->scratch_pad;
2896
2897         m = crypto_aead_authsize(aead);
2898
2899         memcpy(b0, ivptr, 16);
2900
2901         lp = b0[0];
2902         l = lp + 1;
2903
2904         /* set m, bits 3-5 */
2905         *b0 |= (8 * ((m - 2) / 2));
2906
2907         /* set adata, bit 6, if associated data is used */
2908         if (req->assoclen)
2909                 *b0 |= 64;
2910         rc = set_msg_len(b0 + 16 - l,
2911                          (op_type == CHCR_DECRYPT_OP) ?
2912                          req->cryptlen - m : req->cryptlen, l);
2913
2914         return rc;
2915 }
2916
2917 static inline int crypto_ccm_check_iv(const u8 *iv)
2918 {
2919         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2920         if (iv[0] < 1 || iv[0] > 7)
2921                 return -EINVAL;
2922
2923         return 0;
2924 }
2925
2926 static int ccm_format_packet(struct aead_request *req,
2927                              u8 *ivptr,
2928                              unsigned int sub_type,
2929                              unsigned short op_type,
2930                              unsigned int assoclen)
2931 {
2932         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2933         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2934         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2935         int rc = 0;
2936
2937         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2938                 ivptr[0] = 3;
2939                 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2940                 memcpy(ivptr + 4, req->iv, 8);
2941                 memset(ivptr + 12, 0, 4);
2942         } else {
2943                 memcpy(ivptr, req->iv, 16);
2944         }
2945         if (assoclen)
2946                 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2947
2948         rc = generate_b0(req, ivptr, op_type);
2949         /* zero the ctr value */
2950         memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2951         return rc;
2952 }
2953
2954 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2955                                   unsigned int dst_size,
2956                                   struct aead_request *req,
2957                                   unsigned short op_type)
2958 {
2959         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2960         struct chcr_context *ctx = a_ctx(tfm);
2961         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2962         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2963         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2964         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2965         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2966         unsigned int ccm_xtra;
2967         unsigned int tag_offset = 0, auth_offset = 0;
2968         unsigned int assoclen;
2969
2970         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2971                 assoclen = req->assoclen - 8;
2972         else
2973                 assoclen = req->assoclen;
2974         ccm_xtra = CCM_B0_SIZE +
2975                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2976
2977         auth_offset = req->cryptlen ?
2978                 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2979         if (op_type == CHCR_DECRYPT_OP) {
2980                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2981                         tag_offset = crypto_aead_authsize(tfm);
2982                 else
2983                         auth_offset = 0;
2984         }
2985
2986         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2987         sec_cpl->pldlen =
2988                 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2989         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2990         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2991                                 1 + IV, IV + assoclen + ccm_xtra,
2992                                 req->assoclen + IV + 1 + ccm_xtra, 0);
2993
2994         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2995                                         auth_offset, tag_offset,
2996                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2997                                         crypto_aead_authsize(tfm));
2998         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2999                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3000                                         cipher_mode, mac_mode,
3001                                         aeadctx->hmac_ctrl, IV >> 1);
3002
3003         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3004                                         0, dst_size);
3005 }
3006
3007 static int aead_ccm_validate_input(unsigned short op_type,
3008                                    struct aead_request *req,
3009                                    struct chcr_aead_ctx *aeadctx,
3010                                    unsigned int sub_type)
3011 {
3012         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3013                 if (crypto_ccm_check_iv(req->iv)) {
3014                         pr_err("CCM: IV check fails\n");
3015                         return -EINVAL;
3016                 }
3017         } else {
3018                 if (req->assoclen != 16 && req->assoclen != 20) {
3019                         pr_err("RFC4309: Invalid AAD length %d\n",
3020                                req->assoclen);
3021                         return -EINVAL;
3022                 }
3023         }
3024         return 0;
3025 }
3026
3027 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3028                                           unsigned short qid,
3029                                           int size)
3030 {
3031         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3032         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3033         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3034         struct sk_buff *skb = NULL;
3035         struct chcr_wr *chcr_req;
3036         struct cpl_rx_phys_dsgl *phys_cpl;
3037         struct ulptx_sgl *ulptx;
3038         unsigned int transhdr_len;
3039         unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3040         unsigned int sub_type, assoclen = req->assoclen;
3041         unsigned int authsize = crypto_aead_authsize(tfm);
3042         int error = -EINVAL;
3043         u8 *ivptr;
3044         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3045                 GFP_ATOMIC;
3046         struct adapter *adap = padap(a_ctx(tfm)->dev);
3047
3048         sub_type = get_aead_subtype(tfm);
3049         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3050                 assoclen -= 8;
3051         reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3052         error = chcr_aead_common_init(req);
3053         if (error)
3054                 return ERR_PTR(error);
3055
3056         error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3057         if (error)
3058                 goto err;
3059         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3060                         + (reqctx->op ? -authsize : authsize),
3061                         CHCR_DST_SG_SIZE, 0);
3062         dnents += MIN_CCM_SG; // For IV and B0
3063         dst_size = get_space_for_phys_dsgl(dnents);
3064         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3065                                CHCR_SRC_SG_SIZE, 0);
3066         snents += MIN_CCM_SG; //For B0
3067         kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3068         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3069         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3070                        reqctx->b0_len) <= SGE_MAX_WR_LEN;
3071         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3072                                      reqctx->b0_len, 16) :
3073                 (sgl_len(snents) *  8);
3074         transhdr_len += temp;
3075         transhdr_len = roundup(transhdr_len, 16);
3076
3077         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3078                                 reqctx->b0_len, transhdr_len, reqctx->op)) {
3079                 atomic_inc(&adap->chcr_stats.fallback);
3080                 chcr_aead_common_exit(req);
3081                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3082         }
3083         skb = alloc_skb(transhdr_len,  flags);
3084
3085         if (!skb) {
3086                 error = -ENOMEM;
3087                 goto err;
3088         }
3089
3090         chcr_req = __skb_put_zero(skb, transhdr_len);
3091
3092         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3093
3094         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3095         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3096         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3097                         aeadctx->key, aeadctx->enckey_len);
3098
3099         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3100         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3101         ulptx = (struct ulptx_sgl *)(ivptr + IV);
3102         error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3103         if (error)
3104                 goto dstmap_fail;
3105         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3106         chcr_add_aead_src_ent(req, ulptx);
3107
3108         atomic_inc(&adap->chcr_stats.aead_rqst);
3109         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3110                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3111                 reqctx->b0_len) : 0);
3112         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3113                     transhdr_len, temp, 0);
3114         reqctx->skb = skb;
3115
3116         return skb;
3117 dstmap_fail:
3118         kfree_skb(skb);
3119 err:
3120         chcr_aead_common_exit(req);
3121         return ERR_PTR(error);
3122 }
3123
3124 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3125                                      unsigned short qid,
3126                                      int size)
3127 {
3128         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3129         struct chcr_context *ctx = a_ctx(tfm);
3130         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3131         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3132         struct sk_buff *skb = NULL;
3133         struct chcr_wr *chcr_req;
3134         struct cpl_rx_phys_dsgl *phys_cpl;
3135         struct ulptx_sgl *ulptx;
3136         unsigned int transhdr_len, dnents = 0, snents;
3137         unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3138         unsigned int authsize = crypto_aead_authsize(tfm);
3139         int error = -EINVAL;
3140         u8 *ivptr;
3141         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3142                 GFP_ATOMIC;
3143         struct adapter *adap = padap(ctx->dev);
3144         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3145
3146         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3147                 assoclen = req->assoclen - 8;
3148
3149         reqctx->b0_len = 0;
3150         error = chcr_aead_common_init(req);
3151         if (error)
3152                 return ERR_PTR(error);
3153         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3154                                 (reqctx->op ? -authsize : authsize),
3155                                 CHCR_DST_SG_SIZE, 0);
3156         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3157                                CHCR_SRC_SG_SIZE, 0);
3158         dnents += MIN_GCM_SG; // For IV
3159         dst_size = get_space_for_phys_dsgl(dnents);
3160         kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3161         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3162         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3163                         SGE_MAX_WR_LEN;
3164         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3165                 (sgl_len(snents) * 8);
3166         transhdr_len += temp;
3167         transhdr_len = roundup(transhdr_len, 16);
3168         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3169                             transhdr_len, reqctx->op)) {
3170
3171                 atomic_inc(&adap->chcr_stats.fallback);
3172                 chcr_aead_common_exit(req);
3173                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3174         }
3175         skb = alloc_skb(transhdr_len, flags);
3176         if (!skb) {
3177                 error = -ENOMEM;
3178                 goto err;
3179         }
3180
3181         chcr_req = __skb_put_zero(skb, transhdr_len);
3182
3183         //Offset of tag from end
3184         temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3185         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3186                                                 rx_channel_id, 2, 1);
3187         chcr_req->sec_cpl.pldlen =
3188                 htonl(req->assoclen + IV + req->cryptlen);
3189         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3190                                         assoclen ? 1 + IV : 0,
3191                                         assoclen ? IV + assoclen : 0,
3192                                         req->assoclen + IV + 1, 0);
3193         chcr_req->sec_cpl.cipherstop_lo_authinsert =
3194                         FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3195                                                 temp, temp);
3196         chcr_req->sec_cpl.seqno_numivs =
3197                         FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3198                                         CHCR_ENCRYPT_OP) ? 1 : 0,
3199                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
3200                                         CHCR_SCMD_AUTH_MODE_GHASH,
3201                                         aeadctx->hmac_ctrl, IV >> 1);
3202         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3203                                         0, 0, dst_size);
3204         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3205         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3206         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3207                GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3208
3209         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3210         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3211         /* prepare a 16 byte iv */
3212         /* S   A   L  T |  IV | 0x00000001 */
3213         if (get_aead_subtype(tfm) ==
3214             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3215                 memcpy(ivptr, aeadctx->salt, 4);
3216                 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3217         } else {
3218                 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3219         }
3220         put_unaligned_be32(0x01, &ivptr[12]);
3221         ulptx = (struct ulptx_sgl *)(ivptr + 16);
3222
3223         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3224         chcr_add_aead_src_ent(req, ulptx);
3225         atomic_inc(&adap->chcr_stats.aead_rqst);
3226         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3227                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3228         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3229                     transhdr_len, temp, reqctx->verify);
3230         reqctx->skb = skb;
3231         return skb;
3232
3233 err:
3234         chcr_aead_common_exit(req);
3235         return ERR_PTR(error);
3236 }
3237
3238
3239
3240 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3241 {
3242         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3243         struct aead_alg *alg = crypto_aead_alg(tfm);
3244
3245         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3246                                                CRYPTO_ALG_NEED_FALLBACK |
3247                                                CRYPTO_ALG_ASYNC);
3248         if  (IS_ERR(aeadctx->sw_cipher))
3249                 return PTR_ERR(aeadctx->sw_cipher);
3250         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3251                                  sizeof(struct aead_request) +
3252                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
3253         return chcr_device_init(a_ctx(tfm));
3254 }
3255
3256 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3257 {
3258         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3259
3260         crypto_free_aead(aeadctx->sw_cipher);
3261 }
3262
3263 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3264                                         unsigned int authsize)
3265 {
3266         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3267
3268         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3269         aeadctx->mayverify = VERIFY_HW;
3270         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3271 }
3272 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3273                                     unsigned int authsize)
3274 {
3275         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3276         u32 maxauth = crypto_aead_maxauthsize(tfm);
3277
3278         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3279          * true for sha1. authsize == 12 condition should be before
3280          * authsize == (maxauth >> 1)
3281          */
3282         if (authsize == ICV_4) {
3283                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3284                 aeadctx->mayverify = VERIFY_HW;
3285         } else if (authsize == ICV_6) {
3286                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3287                 aeadctx->mayverify = VERIFY_HW;
3288         } else if (authsize == ICV_10) {
3289                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3290                 aeadctx->mayverify = VERIFY_HW;
3291         } else if (authsize == ICV_12) {
3292                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3293                 aeadctx->mayverify = VERIFY_HW;
3294         } else if (authsize == ICV_14) {
3295                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3296                 aeadctx->mayverify = VERIFY_HW;
3297         } else if (authsize == (maxauth >> 1)) {
3298                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3299                 aeadctx->mayverify = VERIFY_HW;
3300         } else if (authsize == maxauth) {
3301                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3302                 aeadctx->mayverify = VERIFY_HW;
3303         } else {
3304                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3305                 aeadctx->mayverify = VERIFY_SW;
3306         }
3307         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3308 }
3309
3310
3311 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3312 {
3313         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3314
3315         switch (authsize) {
3316         case ICV_4:
3317                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3318                 aeadctx->mayverify = VERIFY_HW;
3319                 break;
3320         case ICV_8:
3321                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3322                 aeadctx->mayverify = VERIFY_HW;
3323                 break;
3324         case ICV_12:
3325                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3326                 aeadctx->mayverify = VERIFY_HW;
3327                 break;
3328         case ICV_14:
3329                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3330                 aeadctx->mayverify = VERIFY_HW;
3331                 break;
3332         case ICV_16:
3333                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3334                 aeadctx->mayverify = VERIFY_HW;
3335                 break;
3336         case ICV_13:
3337         case ICV_15:
3338                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3339                 aeadctx->mayverify = VERIFY_SW;
3340                 break;
3341         default:
3342                 return -EINVAL;
3343         }
3344         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3345 }
3346
3347 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3348                                           unsigned int authsize)
3349 {
3350         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3351
3352         switch (authsize) {
3353         case ICV_8:
3354                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3355                 aeadctx->mayverify = VERIFY_HW;
3356                 break;
3357         case ICV_12:
3358                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3359                 aeadctx->mayverify = VERIFY_HW;
3360                 break;
3361         case ICV_16:
3362                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3363                 aeadctx->mayverify = VERIFY_HW;
3364                 break;
3365         default:
3366                 return -EINVAL;
3367         }
3368         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3369 }
3370
3371 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3372                                 unsigned int authsize)
3373 {
3374         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3375
3376         switch (authsize) {
3377         case ICV_4:
3378                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3379                 aeadctx->mayverify = VERIFY_HW;
3380                 break;
3381         case ICV_6:
3382                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3383                 aeadctx->mayverify = VERIFY_HW;
3384                 break;
3385         case ICV_8:
3386                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3387                 aeadctx->mayverify = VERIFY_HW;
3388                 break;
3389         case ICV_10:
3390                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3391                 aeadctx->mayverify = VERIFY_HW;
3392                 break;
3393         case ICV_12:
3394                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3395                 aeadctx->mayverify = VERIFY_HW;
3396                 break;
3397         case ICV_14:
3398                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3399                 aeadctx->mayverify = VERIFY_HW;
3400                 break;
3401         case ICV_16:
3402                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3403                 aeadctx->mayverify = VERIFY_HW;
3404                 break;
3405         default:
3406                 return -EINVAL;
3407         }
3408         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3409 }
3410
3411 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3412                                 const u8 *key,
3413                                 unsigned int keylen)
3414 {
3415         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3416         unsigned char ck_size, mk_size;
3417         int key_ctx_size = 0;
3418
3419         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3420         if (keylen == AES_KEYSIZE_128) {
3421                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3422                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3423         } else if (keylen == AES_KEYSIZE_192) {
3424                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3425                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3426         } else if (keylen == AES_KEYSIZE_256) {
3427                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3428                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3429         } else {
3430                 aeadctx->enckey_len = 0;
3431                 return  -EINVAL;
3432         }
3433         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3434                                                 key_ctx_size >> 4);
3435         memcpy(aeadctx->key, key, keylen);
3436         aeadctx->enckey_len = keylen;
3437
3438         return 0;
3439 }
3440
3441 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3442                                 const u8 *key,
3443                                 unsigned int keylen)
3444 {
3445         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3446         int error;
3447
3448         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3449         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3450                               CRYPTO_TFM_REQ_MASK);
3451         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3452         if (error)
3453                 return error;
3454         return chcr_ccm_common_setkey(aead, key, keylen);
3455 }
3456
3457 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3458                                     unsigned int keylen)
3459 {
3460         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3461         int error;
3462
3463         if (keylen < 3) {
3464                 aeadctx->enckey_len = 0;
3465                 return  -EINVAL;
3466         }
3467         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3468         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3469                               CRYPTO_TFM_REQ_MASK);
3470         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3471         if (error)
3472                 return error;
3473         keylen -= 3;
3474         memcpy(aeadctx->salt, key + keylen, 3);
3475         return chcr_ccm_common_setkey(aead, key, keylen);
3476 }
3477
3478 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3479                            unsigned int keylen)
3480 {
3481         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3482         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3483         unsigned int ck_size;
3484         int ret = 0, key_ctx_size = 0;
3485         struct crypto_aes_ctx aes;
3486
3487         aeadctx->enckey_len = 0;
3488         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3489         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3490                               & CRYPTO_TFM_REQ_MASK);
3491         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3492         if (ret)
3493                 goto out;
3494
3495         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3496             keylen > 3) {
3497                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3498                 memcpy(aeadctx->salt, key + keylen, 4);
3499         }
3500         if (keylen == AES_KEYSIZE_128) {
3501                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3502         } else if (keylen == AES_KEYSIZE_192) {
3503                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3504         } else if (keylen == AES_KEYSIZE_256) {
3505                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3506         } else {
3507                 pr_err("GCM: Invalid key length %d\n", keylen);
3508                 ret = -EINVAL;
3509                 goto out;
3510         }
3511
3512         memcpy(aeadctx->key, key, keylen);
3513         aeadctx->enckey_len = keylen;
3514         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3515                 AEAD_H_SIZE;
3516         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3517                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3518                                                 0, 0,
3519                                                 key_ctx_size >> 4);
3520         /* Calculate the H = CIPH(K, 0 repeated 16 times).
3521          * It will go in key context
3522          */
3523         ret = aes_expandkey(&aes, key, keylen);
3524         if (ret) {
3525                 aeadctx->enckey_len = 0;
3526                 goto out;
3527         }
3528         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3529         aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3530         memzero_explicit(&aes, sizeof(aes));
3531
3532 out:
3533         return ret;
3534 }
3535
3536 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3537                                    unsigned int keylen)
3538 {
3539         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3540         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3541         /* it contains auth and cipher key both*/
3542         struct crypto_authenc_keys keys;
3543         unsigned int bs, subtype;
3544         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3545         int err = 0, i, key_ctx_len = 0;
3546         unsigned char ck_size = 0;
3547         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3548         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3549         struct algo_param param;
3550         int align;
3551         u8 *o_ptr = NULL;
3552
3553         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3554         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3555                               & CRYPTO_TFM_REQ_MASK);
3556         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3557         if (err)
3558                 goto out;
3559
3560         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3561                 goto out;
3562
3563         if (get_alg_config(&param, max_authsize)) {
3564                 pr_err("Unsupported digest size\n");
3565                 goto out;
3566         }
3567         subtype = get_aead_subtype(authenc);
3568         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3569                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3570                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3571                         goto out;
3572                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3573                 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3574                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3575         }
3576         if (keys.enckeylen == AES_KEYSIZE_128) {
3577                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3578         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3579                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3580         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3581                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3582         } else {
3583                 pr_err("Unsupported cipher key\n");
3584                 goto out;
3585         }
3586
3587         /* Copy only encryption key. We use authkey to generate h(ipad) and
3588          * h(opad) so authkey is not needed again. authkeylen size have the
3589          * size of the hash digest size.
3590          */
3591         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3592         aeadctx->enckey_len = keys.enckeylen;
3593         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3594                 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3595
3596                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3597                             aeadctx->enckey_len << 3);
3598         }
3599         base_hash  = chcr_alloc_shash(max_authsize);
3600         if (IS_ERR(base_hash)) {
3601                 pr_err("Base driver cannot be loaded\n");
3602                 goto out;
3603         }
3604         {
3605                 SHASH_DESC_ON_STACK(shash, base_hash);
3606
3607                 shash->tfm = base_hash;
3608                 bs = crypto_shash_blocksize(base_hash);
3609                 align = KEYCTX_ALIGN_PAD(max_authsize);
3610                 o_ptr =  actx->h_iopad + param.result_size + align;
3611
3612                 if (keys.authkeylen > bs) {
3613                         err = crypto_shash_digest(shash, keys.authkey,
3614                                                   keys.authkeylen,
3615                                                   o_ptr);
3616                         if (err) {
3617                                 pr_err("Base driver cannot be loaded\n");
3618                                 goto out;
3619                         }
3620                         keys.authkeylen = max_authsize;
3621                 } else
3622                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
3623
3624                 /* Compute the ipad-digest*/
3625                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3626                 memcpy(pad, o_ptr, keys.authkeylen);
3627                 for (i = 0; i < bs >> 2; i++)
3628                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3629
3630                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3631                                               max_authsize))
3632                         goto out;
3633                 /* Compute the opad-digest */
3634                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3635                 memcpy(pad, o_ptr, keys.authkeylen);
3636                 for (i = 0; i < bs >> 2; i++)
3637                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3638
3639                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3640                         goto out;
3641
3642                 /* convert the ipad and opad digest to network order */
3643                 chcr_change_order(actx->h_iopad, param.result_size);
3644                 chcr_change_order(o_ptr, param.result_size);
3645                 key_ctx_len = sizeof(struct _key_ctx) +
3646                         roundup(keys.enckeylen, 16) +
3647                         (param.result_size + align) * 2;
3648                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3649                                                 0, 1, key_ctx_len >> 4);
3650                 actx->auth_mode = param.auth_mode;
3651                 chcr_free_shash(base_hash);
3652
3653                 memzero_explicit(&keys, sizeof(keys));
3654                 return 0;
3655         }
3656 out:
3657         aeadctx->enckey_len = 0;
3658         memzero_explicit(&keys, sizeof(keys));
3659         if (!IS_ERR(base_hash))
3660                 chcr_free_shash(base_hash);
3661         return -EINVAL;
3662 }
3663
3664 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3665                                         const u8 *key, unsigned int keylen)
3666 {
3667         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3668         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3669         struct crypto_authenc_keys keys;
3670         int err;
3671         /* it contains auth and cipher key both*/
3672         unsigned int subtype;
3673         int key_ctx_len = 0;
3674         unsigned char ck_size = 0;
3675
3676         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3677         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3678                               & CRYPTO_TFM_REQ_MASK);
3679         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3680         if (err)
3681                 goto out;
3682
3683         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3684                 goto out;
3685
3686         subtype = get_aead_subtype(authenc);
3687         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3688             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3689                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3690                         goto out;
3691                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3692                         - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3693                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3694         }
3695         if (keys.enckeylen == AES_KEYSIZE_128) {
3696                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3697         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3698                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3699         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3700                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3701         } else {
3702                 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3703                 goto out;
3704         }
3705         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3706         aeadctx->enckey_len = keys.enckeylen;
3707         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3708             subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3709                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3710                                 aeadctx->enckey_len << 3);
3711         }
3712         key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3713
3714         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3715                                                 0, key_ctx_len >> 4);
3716         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3717         memzero_explicit(&keys, sizeof(keys));
3718         return 0;
3719 out:
3720         aeadctx->enckey_len = 0;
3721         memzero_explicit(&keys, sizeof(keys));
3722         return -EINVAL;
3723 }
3724
3725 static int chcr_aead_op(struct aead_request *req,
3726                         int size,
3727                         create_wr_t create_wr_fn)
3728 {
3729         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3730         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3731         struct chcr_context *ctx = a_ctx(tfm);
3732         struct uld_ctx *u_ctx = ULD_CTX(ctx);
3733         struct sk_buff *skb;
3734         struct chcr_dev *cdev;
3735
3736         cdev = a_ctx(tfm)->dev;
3737         if (!cdev) {
3738                 pr_err("%s : No crypto device.\n", __func__);
3739                 return -ENXIO;
3740         }
3741
3742         if (chcr_inc_wrcount(cdev)) {
3743         /* Detach state for CHCR means lldi or padap is freed.
3744          * We cannot increment fallback here.
3745          */
3746                 return chcr_aead_fallback(req, reqctx->op);
3747         }
3748
3749         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3750                                         reqctx->txqidx) &&
3751                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3752                         chcr_dec_wrcount(cdev);
3753                         return -ENOSPC;
3754         }
3755
3756         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3757             crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3758                 pr_err("RFC4106: Invalid value of assoclen %d\n",
3759                        req->assoclen);
3760                 return -EINVAL;
3761         }
3762
3763         /* Form a WR from req */
3764         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3765
3766         if (IS_ERR_OR_NULL(skb)) {
3767                 chcr_dec_wrcount(cdev);
3768                 return PTR_ERR_OR_ZERO(skb);
3769         }
3770
3771         skb->dev = u_ctx->lldi.ports[0];
3772         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3773         chcr_send_wr(skb);
3774         return -EINPROGRESS;
3775 }
3776
3777 static int chcr_aead_encrypt(struct aead_request *req)
3778 {
3779         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3780         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3781         struct chcr_context *ctx = a_ctx(tfm);
3782         unsigned int cpu;
3783
3784         cpu = get_cpu();
3785         reqctx->txqidx = cpu % ctx->ntxq;
3786         reqctx->rxqidx = cpu % ctx->nrxq;
3787         put_cpu();
3788
3789         reqctx->verify = VERIFY_HW;
3790         reqctx->op = CHCR_ENCRYPT_OP;
3791
3792         switch (get_aead_subtype(tfm)) {
3793         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3794         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3795         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3796         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3797                 return chcr_aead_op(req, 0, create_authenc_wr);
3798         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3799         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3800                 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3801         default:
3802                 return chcr_aead_op(req, 0, create_gcm_wr);
3803         }
3804 }
3805
3806 static int chcr_aead_decrypt(struct aead_request *req)
3807 {
3808         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3809         struct chcr_context *ctx = a_ctx(tfm);
3810         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3811         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3812         int size;
3813         unsigned int cpu;
3814
3815         cpu = get_cpu();
3816         reqctx->txqidx = cpu % ctx->ntxq;
3817         reqctx->rxqidx = cpu % ctx->nrxq;
3818         put_cpu();
3819
3820         if (aeadctx->mayverify == VERIFY_SW) {
3821                 size = crypto_aead_maxauthsize(tfm);
3822                 reqctx->verify = VERIFY_SW;
3823         } else {
3824                 size = 0;
3825                 reqctx->verify = VERIFY_HW;
3826         }
3827         reqctx->op = CHCR_DECRYPT_OP;
3828         switch (get_aead_subtype(tfm)) {
3829         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3830         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3831         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3832         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3833                 return chcr_aead_op(req, size, create_authenc_wr);
3834         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3835         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3836                 return chcr_aead_op(req, size, create_aead_ccm_wr);
3837         default:
3838                 return chcr_aead_op(req, size, create_gcm_wr);
3839         }
3840 }
3841
3842 static struct chcr_alg_template driver_algs[] = {
3843         /* AES-CBC */
3844         {
3845                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3846                 .is_registered = 0,
3847                 .alg.skcipher = {
3848                         .base.cra_name          = "cbc(aes)",
3849                         .base.cra_driver_name   = "cbc-aes-chcr",
3850                         .base.cra_blocksize     = AES_BLOCK_SIZE,
3851
3852                         .init                   = chcr_init_tfm,
3853                         .exit                   = chcr_exit_tfm,
3854                         .min_keysize            = AES_MIN_KEY_SIZE,
3855                         .max_keysize            = AES_MAX_KEY_SIZE,
3856                         .ivsize                 = AES_BLOCK_SIZE,
3857                         .setkey                 = chcr_aes_cbc_setkey,
3858                         .encrypt                = chcr_aes_encrypt,
3859                         .decrypt                = chcr_aes_decrypt,
3860                         }
3861         },
3862         {
3863                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3864                 .is_registered = 0,
3865                 .alg.skcipher = {
3866                         .base.cra_name          = "xts(aes)",
3867                         .base.cra_driver_name   = "xts-aes-chcr",
3868                         .base.cra_blocksize     = AES_BLOCK_SIZE,
3869
3870                         .init                   = chcr_init_tfm,
3871                         .exit                   = chcr_exit_tfm,
3872                         .min_keysize            = 2 * AES_MIN_KEY_SIZE,
3873                         .max_keysize            = 2 * AES_MAX_KEY_SIZE,
3874                         .ivsize                 = AES_BLOCK_SIZE,
3875                         .setkey                 = chcr_aes_xts_setkey,
3876                         .encrypt                = chcr_aes_encrypt,
3877                         .decrypt                = chcr_aes_decrypt,
3878                         }
3879         },
3880         {
3881                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3882                 .is_registered = 0,
3883                 .alg.skcipher = {
3884                         .base.cra_name          = "ctr(aes)",
3885                         .base.cra_driver_name   = "ctr-aes-chcr",
3886                         .base.cra_blocksize     = 1,
3887
3888                         .init                   = chcr_init_tfm,
3889                         .exit                   = chcr_exit_tfm,
3890                         .min_keysize            = AES_MIN_KEY_SIZE,
3891                         .max_keysize            = AES_MAX_KEY_SIZE,
3892                         .ivsize                 = AES_BLOCK_SIZE,
3893                         .setkey                 = chcr_aes_ctr_setkey,
3894                         .encrypt                = chcr_aes_encrypt,
3895                         .decrypt                = chcr_aes_decrypt,
3896                 }
3897         },
3898         {
3899                 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3900                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3901                 .is_registered = 0,
3902                 .alg.skcipher = {
3903                         .base.cra_name          = "rfc3686(ctr(aes))",
3904                         .base.cra_driver_name   = "rfc3686-ctr-aes-chcr",
3905                         .base.cra_blocksize     = 1,
3906
3907                         .init                   = chcr_rfc3686_init,
3908                         .exit                   = chcr_exit_tfm,
3909                         .min_keysize            = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3910                         .max_keysize            = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3911                         .ivsize                 = CTR_RFC3686_IV_SIZE,
3912                         .setkey                 = chcr_aes_rfc3686_setkey,
3913                         .encrypt                = chcr_aes_encrypt,
3914                         .decrypt                = chcr_aes_decrypt,
3915                 }
3916         },
3917         /* SHA */
3918         {
3919                 .type = CRYPTO_ALG_TYPE_AHASH,
3920                 .is_registered = 0,
3921                 .alg.hash = {
3922                         .halg.digestsize = SHA1_DIGEST_SIZE,
3923                         .halg.base = {
3924                                 .cra_name = "sha1",
3925                                 .cra_driver_name = "sha1-chcr",
3926                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3927                         }
3928                 }
3929         },
3930         {
3931                 .type = CRYPTO_ALG_TYPE_AHASH,
3932                 .is_registered = 0,
3933                 .alg.hash = {
3934                         .halg.digestsize = SHA256_DIGEST_SIZE,
3935                         .halg.base = {
3936                                 .cra_name = "sha256",
3937                                 .cra_driver_name = "sha256-chcr",
3938                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3939                         }
3940                 }
3941         },
3942         {
3943                 .type = CRYPTO_ALG_TYPE_AHASH,
3944                 .is_registered = 0,
3945                 .alg.hash = {
3946                         .halg.digestsize = SHA224_DIGEST_SIZE,
3947                         .halg.base = {
3948                                 .cra_name = "sha224",
3949                                 .cra_driver_name = "sha224-chcr",
3950                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3951                         }
3952                 }
3953         },
3954         {
3955                 .type = CRYPTO_ALG_TYPE_AHASH,
3956                 .is_registered = 0,
3957                 .alg.hash = {
3958                         .halg.digestsize = SHA384_DIGEST_SIZE,
3959                         .halg.base = {
3960                                 .cra_name = "sha384",
3961                                 .cra_driver_name = "sha384-chcr",
3962                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3963                         }
3964                 }
3965         },
3966         {
3967                 .type = CRYPTO_ALG_TYPE_AHASH,
3968                 .is_registered = 0,
3969                 .alg.hash = {
3970                         .halg.digestsize = SHA512_DIGEST_SIZE,
3971                         .halg.base = {
3972                                 .cra_name = "sha512",
3973                                 .cra_driver_name = "sha512-chcr",
3974                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3975                         }
3976                 }
3977         },
3978         /* HMAC */
3979         {
3980                 .type = CRYPTO_ALG_TYPE_HMAC,
3981                 .is_registered = 0,
3982                 .alg.hash = {
3983                         .halg.digestsize = SHA1_DIGEST_SIZE,
3984                         .halg.base = {
3985                                 .cra_name = "hmac(sha1)",
3986                                 .cra_driver_name = "hmac-sha1-chcr",
3987                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3988                         }
3989                 }
3990         },
3991         {
3992                 .type = CRYPTO_ALG_TYPE_HMAC,
3993                 .is_registered = 0,
3994                 .alg.hash = {
3995                         .halg.digestsize = SHA224_DIGEST_SIZE,
3996                         .halg.base = {
3997                                 .cra_name = "hmac(sha224)",
3998                                 .cra_driver_name = "hmac-sha224-chcr",
3999                                 .cra_blocksize = SHA224_BLOCK_SIZE,
4000                         }
4001                 }
4002         },
4003         {
4004                 .type = CRYPTO_ALG_TYPE_HMAC,
4005                 .is_registered = 0,
4006                 .alg.hash = {
4007                         .halg.digestsize = SHA256_DIGEST_SIZE,
4008                         .halg.base = {
4009                                 .cra_name = "hmac(sha256)",
4010                                 .cra_driver_name = "hmac-sha256-chcr",
4011                                 .cra_blocksize = SHA256_BLOCK_SIZE,
4012                         }
4013                 }
4014         },
4015         {
4016                 .type = CRYPTO_ALG_TYPE_HMAC,
4017                 .is_registered = 0,
4018                 .alg.hash = {
4019                         .halg.digestsize = SHA384_DIGEST_SIZE,
4020                         .halg.base = {
4021                                 .cra_name = "hmac(sha384)",
4022                                 .cra_driver_name = "hmac-sha384-chcr",
4023                                 .cra_blocksize = SHA384_BLOCK_SIZE,
4024                         }
4025                 }
4026         },
4027         {
4028                 .type = CRYPTO_ALG_TYPE_HMAC,
4029                 .is_registered = 0,
4030                 .alg.hash = {
4031                         .halg.digestsize = SHA512_DIGEST_SIZE,
4032                         .halg.base = {
4033                                 .cra_name = "hmac(sha512)",
4034                                 .cra_driver_name = "hmac-sha512-chcr",
4035                                 .cra_blocksize = SHA512_BLOCK_SIZE,
4036                         }
4037                 }
4038         },
4039         /* Add AEAD Algorithms */
4040         {
4041                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4042                 .is_registered = 0,
4043                 .alg.aead = {
4044                         .base = {
4045                                 .cra_name = "gcm(aes)",
4046                                 .cra_driver_name = "gcm-aes-chcr",
4047                                 .cra_blocksize  = 1,
4048                                 .cra_priority = CHCR_AEAD_PRIORITY,
4049                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4050                                                 sizeof(struct chcr_aead_ctx) +
4051                                                 sizeof(struct chcr_gcm_ctx),
4052                         },
4053                         .ivsize = GCM_AES_IV_SIZE,
4054                         .maxauthsize = GHASH_DIGEST_SIZE,
4055                         .setkey = chcr_gcm_setkey,
4056                         .setauthsize = chcr_gcm_setauthsize,
4057                 }
4058         },
4059         {
4060                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4061                 .is_registered = 0,
4062                 .alg.aead = {
4063                         .base = {
4064                                 .cra_name = "rfc4106(gcm(aes))",
4065                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4066                                 .cra_blocksize   = 1,
4067                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4068                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4069                                                 sizeof(struct chcr_aead_ctx) +
4070                                                 sizeof(struct chcr_gcm_ctx),
4071
4072                         },
4073                         .ivsize = GCM_RFC4106_IV_SIZE,
4074                         .maxauthsize    = GHASH_DIGEST_SIZE,
4075                         .setkey = chcr_gcm_setkey,
4076                         .setauthsize    = chcr_4106_4309_setauthsize,
4077                 }
4078         },
4079         {
4080                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4081                 .is_registered = 0,
4082                 .alg.aead = {
4083                         .base = {
4084                                 .cra_name = "ccm(aes)",
4085                                 .cra_driver_name = "ccm-aes-chcr",
4086                                 .cra_blocksize   = 1,
4087                                 .cra_priority = CHCR_AEAD_PRIORITY,
4088                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4089                                                 sizeof(struct chcr_aead_ctx),
4090
4091                         },
4092                         .ivsize = AES_BLOCK_SIZE,
4093                         .maxauthsize    = GHASH_DIGEST_SIZE,
4094                         .setkey = chcr_aead_ccm_setkey,
4095                         .setauthsize    = chcr_ccm_setauthsize,
4096                 }
4097         },
4098         {
4099                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4100                 .is_registered = 0,
4101                 .alg.aead = {
4102                         .base = {
4103                                 .cra_name = "rfc4309(ccm(aes))",
4104                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4105                                 .cra_blocksize   = 1,
4106                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4107                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4108                                                 sizeof(struct chcr_aead_ctx),
4109
4110                         },
4111                         .ivsize = 8,
4112                         .maxauthsize    = GHASH_DIGEST_SIZE,
4113                         .setkey = chcr_aead_rfc4309_setkey,
4114                         .setauthsize = chcr_4106_4309_setauthsize,
4115                 }
4116         },
4117         {
4118                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4119                 .is_registered = 0,
4120                 .alg.aead = {
4121                         .base = {
4122                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4123                                 .cra_driver_name =
4124                                         "authenc-hmac-sha1-cbc-aes-chcr",
4125                                 .cra_blocksize   = AES_BLOCK_SIZE,
4126                                 .cra_priority = CHCR_AEAD_PRIORITY,
4127                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4128                                                 sizeof(struct chcr_aead_ctx) +
4129                                                 sizeof(struct chcr_authenc_ctx),
4130
4131                         },
4132                         .ivsize = AES_BLOCK_SIZE,
4133                         .maxauthsize = SHA1_DIGEST_SIZE,
4134                         .setkey = chcr_authenc_setkey,
4135                         .setauthsize = chcr_authenc_setauthsize,
4136                 }
4137         },
4138         {
4139                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4140                 .is_registered = 0,
4141                 .alg.aead = {
4142                         .base = {
4143
4144                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4145                                 .cra_driver_name =
4146                                         "authenc-hmac-sha256-cbc-aes-chcr",
4147                                 .cra_blocksize   = AES_BLOCK_SIZE,
4148                                 .cra_priority = CHCR_AEAD_PRIORITY,
4149                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4150                                                 sizeof(struct chcr_aead_ctx) +
4151                                                 sizeof(struct chcr_authenc_ctx),
4152
4153                         },
4154                         .ivsize = AES_BLOCK_SIZE,
4155                         .maxauthsize    = SHA256_DIGEST_SIZE,
4156                         .setkey = chcr_authenc_setkey,
4157                         .setauthsize = chcr_authenc_setauthsize,
4158                 }
4159         },
4160         {
4161                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4162                 .is_registered = 0,
4163                 .alg.aead = {
4164                         .base = {
4165                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4166                                 .cra_driver_name =
4167                                         "authenc-hmac-sha224-cbc-aes-chcr",
4168                                 .cra_blocksize   = AES_BLOCK_SIZE,
4169                                 .cra_priority = CHCR_AEAD_PRIORITY,
4170                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4171                                                 sizeof(struct chcr_aead_ctx) +
4172                                                 sizeof(struct chcr_authenc_ctx),
4173                         },
4174                         .ivsize = AES_BLOCK_SIZE,
4175                         .maxauthsize = SHA224_DIGEST_SIZE,
4176                         .setkey = chcr_authenc_setkey,
4177                         .setauthsize = chcr_authenc_setauthsize,
4178                 }
4179         },
4180         {
4181                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4182                 .is_registered = 0,
4183                 .alg.aead = {
4184                         .base = {
4185                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4186                                 .cra_driver_name =
4187                                         "authenc-hmac-sha384-cbc-aes-chcr",
4188                                 .cra_blocksize   = AES_BLOCK_SIZE,
4189                                 .cra_priority = CHCR_AEAD_PRIORITY,
4190                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4191                                                 sizeof(struct chcr_aead_ctx) +
4192                                                 sizeof(struct chcr_authenc_ctx),
4193
4194                         },
4195                         .ivsize = AES_BLOCK_SIZE,
4196                         .maxauthsize = SHA384_DIGEST_SIZE,
4197                         .setkey = chcr_authenc_setkey,
4198                         .setauthsize = chcr_authenc_setauthsize,
4199                 }
4200         },
4201         {
4202                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4203                 .is_registered = 0,
4204                 .alg.aead = {
4205                         .base = {
4206                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4207                                 .cra_driver_name =
4208                                         "authenc-hmac-sha512-cbc-aes-chcr",
4209                                 .cra_blocksize   = AES_BLOCK_SIZE,
4210                                 .cra_priority = CHCR_AEAD_PRIORITY,
4211                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4212                                                 sizeof(struct chcr_aead_ctx) +
4213                                                 sizeof(struct chcr_authenc_ctx),
4214
4215                         },
4216                         .ivsize = AES_BLOCK_SIZE,
4217                         .maxauthsize = SHA512_DIGEST_SIZE,
4218                         .setkey = chcr_authenc_setkey,
4219                         .setauthsize = chcr_authenc_setauthsize,
4220                 }
4221         },
4222         {
4223                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4224                 .is_registered = 0,
4225                 .alg.aead = {
4226                         .base = {
4227                                 .cra_name = "authenc(digest_null,cbc(aes))",
4228                                 .cra_driver_name =
4229                                         "authenc-digest_null-cbc-aes-chcr",
4230                                 .cra_blocksize   = AES_BLOCK_SIZE,
4231                                 .cra_priority = CHCR_AEAD_PRIORITY,
4232                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4233                                                 sizeof(struct chcr_aead_ctx) +
4234                                                 sizeof(struct chcr_authenc_ctx),
4235
4236                         },
4237                         .ivsize  = AES_BLOCK_SIZE,
4238                         .maxauthsize = 0,
4239                         .setkey  = chcr_aead_digest_null_setkey,
4240                         .setauthsize = chcr_authenc_null_setauthsize,
4241                 }
4242         },
4243         {
4244                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4245                 .is_registered = 0,
4246                 .alg.aead = {
4247                         .base = {
4248                                 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4249                                 .cra_driver_name =
4250                                 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4251                                 .cra_blocksize   = 1,
4252                                 .cra_priority = CHCR_AEAD_PRIORITY,
4253                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4254                                                 sizeof(struct chcr_aead_ctx) +
4255                                                 sizeof(struct chcr_authenc_ctx),
4256
4257                         },
4258                         .ivsize = CTR_RFC3686_IV_SIZE,
4259                         .maxauthsize = SHA1_DIGEST_SIZE,
4260                         .setkey = chcr_authenc_setkey,
4261                         .setauthsize = chcr_authenc_setauthsize,
4262                 }
4263         },
4264         {
4265                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4266                 .is_registered = 0,
4267                 .alg.aead = {
4268                         .base = {
4269
4270                                 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4271                                 .cra_driver_name =
4272                                 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4273                                 .cra_blocksize   = 1,
4274                                 .cra_priority = CHCR_AEAD_PRIORITY,
4275                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4276                                                 sizeof(struct chcr_aead_ctx) +
4277                                                 sizeof(struct chcr_authenc_ctx),
4278
4279                         },
4280                         .ivsize = CTR_RFC3686_IV_SIZE,
4281                         .maxauthsize    = SHA256_DIGEST_SIZE,
4282                         .setkey = chcr_authenc_setkey,
4283                         .setauthsize = chcr_authenc_setauthsize,
4284                 }
4285         },
4286         {
4287                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4288                 .is_registered = 0,
4289                 .alg.aead = {
4290                         .base = {
4291                                 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4292                                 .cra_driver_name =
4293                                 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4294                                 .cra_blocksize   = 1,
4295                                 .cra_priority = CHCR_AEAD_PRIORITY,
4296                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4297                                                 sizeof(struct chcr_aead_ctx) +
4298                                                 sizeof(struct chcr_authenc_ctx),
4299                         },
4300                         .ivsize = CTR_RFC3686_IV_SIZE,
4301                         .maxauthsize = SHA224_DIGEST_SIZE,
4302                         .setkey = chcr_authenc_setkey,
4303                         .setauthsize = chcr_authenc_setauthsize,
4304                 }
4305         },
4306         {
4307                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4308                 .is_registered = 0,
4309                 .alg.aead = {
4310                         .base = {
4311                                 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4312                                 .cra_driver_name =
4313                                 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4314                                 .cra_blocksize   = 1,
4315                                 .cra_priority = CHCR_AEAD_PRIORITY,
4316                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4317                                                 sizeof(struct chcr_aead_ctx) +
4318                                                 sizeof(struct chcr_authenc_ctx),
4319
4320                         },
4321                         .ivsize = CTR_RFC3686_IV_SIZE,
4322                         .maxauthsize = SHA384_DIGEST_SIZE,
4323                         .setkey = chcr_authenc_setkey,
4324                         .setauthsize = chcr_authenc_setauthsize,
4325                 }
4326         },
4327         {
4328                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4329                 .is_registered = 0,
4330                 .alg.aead = {
4331                         .base = {
4332                                 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4333                                 .cra_driver_name =
4334                                 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4335                                 .cra_blocksize   = 1,
4336                                 .cra_priority = CHCR_AEAD_PRIORITY,
4337                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4338                                                 sizeof(struct chcr_aead_ctx) +
4339                                                 sizeof(struct chcr_authenc_ctx),
4340
4341                         },
4342                         .ivsize = CTR_RFC3686_IV_SIZE,
4343                         .maxauthsize = SHA512_DIGEST_SIZE,
4344                         .setkey = chcr_authenc_setkey,
4345                         .setauthsize = chcr_authenc_setauthsize,
4346                 }
4347         },
4348         {
4349                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4350                 .is_registered = 0,
4351                 .alg.aead = {
4352                         .base = {
4353                                 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4354                                 .cra_driver_name =
4355                                 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4356                                 .cra_blocksize   = 1,
4357                                 .cra_priority = CHCR_AEAD_PRIORITY,
4358                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4359                                                 sizeof(struct chcr_aead_ctx) +
4360                                                 sizeof(struct chcr_authenc_ctx),
4361
4362                         },
4363                         .ivsize  = CTR_RFC3686_IV_SIZE,
4364                         .maxauthsize = 0,
4365                         .setkey  = chcr_aead_digest_null_setkey,
4366                         .setauthsize = chcr_authenc_null_setauthsize,
4367                 }
4368         },
4369 };
4370
4371 /*
4372  *      chcr_unregister_alg - Deregister crypto algorithms with
4373  *      kernel framework.
4374  */
4375 static int chcr_unregister_alg(void)
4376 {
4377         int i;
4378
4379         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4380                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4381                 case CRYPTO_ALG_TYPE_SKCIPHER:
4382                         if (driver_algs[i].is_registered && refcount_read(
4383                             &driver_algs[i].alg.skcipher.base.cra_refcnt)
4384                             == 1) {
4385                                 crypto_unregister_skcipher(
4386                                                 &driver_algs[i].alg.skcipher);
4387                                 driver_algs[i].is_registered = 0;
4388                         }
4389                         break;
4390                 case CRYPTO_ALG_TYPE_AEAD:
4391                         if (driver_algs[i].is_registered && refcount_read(
4392                             &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4393                                 crypto_unregister_aead(
4394                                                 &driver_algs[i].alg.aead);
4395                                 driver_algs[i].is_registered = 0;
4396                         }
4397                         break;
4398                 case CRYPTO_ALG_TYPE_AHASH:
4399                         if (driver_algs[i].is_registered && refcount_read(
4400                             &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4401                             == 1) {
4402                                 crypto_unregister_ahash(
4403                                                 &driver_algs[i].alg.hash);
4404                                 driver_algs[i].is_registered = 0;
4405                         }
4406                         break;
4407                 }
4408         }
4409         return 0;
4410 }
4411
4412 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4413 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4414 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4415
4416 /*
4417  *      chcr_register_alg - Register crypto algorithms with kernel framework.
4418  */
4419 static int chcr_register_alg(void)
4420 {
4421         struct crypto_alg ai;
4422         struct ahash_alg *a_hash;
4423         int err = 0, i;
4424         char *name = NULL;
4425
4426         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4427                 if (driver_algs[i].is_registered)
4428                         continue;
4429                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4430                 case CRYPTO_ALG_TYPE_SKCIPHER:
4431                         driver_algs[i].alg.skcipher.base.cra_priority =
4432                                 CHCR_CRA_PRIORITY;
4433                         driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4434                         driver_algs[i].alg.skcipher.base.cra_flags =
4435                                 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4436                                 CRYPTO_ALG_ALLOCATES_MEMORY |
4437                                 CRYPTO_ALG_NEED_FALLBACK;
4438                         driver_algs[i].alg.skcipher.base.cra_ctxsize =
4439                                 sizeof(struct chcr_context) +
4440                                 sizeof(struct ablk_ctx);
4441                         driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4442
4443                         err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4444                         name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4445                         break;
4446                 case CRYPTO_ALG_TYPE_AEAD:
4447                         driver_algs[i].alg.aead.base.cra_flags =
4448                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4449                                 CRYPTO_ALG_ALLOCATES_MEMORY;
4450                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4451                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4452                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4453                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4454                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4455                         err = crypto_register_aead(&driver_algs[i].alg.aead);
4456                         name = driver_algs[i].alg.aead.base.cra_driver_name;
4457                         break;
4458                 case CRYPTO_ALG_TYPE_AHASH:
4459                         a_hash = &driver_algs[i].alg.hash;
4460                         a_hash->update = chcr_ahash_update;
4461                         a_hash->final = chcr_ahash_final;
4462                         a_hash->finup = chcr_ahash_finup;
4463                         a_hash->digest = chcr_ahash_digest;
4464                         a_hash->export = chcr_ahash_export;
4465                         a_hash->import = chcr_ahash_import;
4466                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4467                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4468                         a_hash->halg.base.cra_module = THIS_MODULE;
4469                         a_hash->halg.base.cra_flags =
4470                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4471                         a_hash->halg.base.cra_alignmask = 0;
4472                         a_hash->halg.base.cra_exit = NULL;
4473
4474                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4475                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4476                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4477                                 a_hash->init = chcr_hmac_init;
4478                                 a_hash->setkey = chcr_ahash_setkey;
4479                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4480                         } else {
4481                                 a_hash->init = chcr_sha_init;
4482                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4483                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4484                         }
4485                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
4486                         ai = driver_algs[i].alg.hash.halg.base;
4487                         name = ai.cra_driver_name;
4488                         break;
4489                 }
4490                 if (err) {
4491                         pr_err("%s : Algorithm registration failed\n", name);
4492                         goto register_err;
4493                 } else {
4494                         driver_algs[i].is_registered = 1;
4495                 }
4496         }
4497         return 0;
4498
4499 register_err:
4500         chcr_unregister_alg();
4501         return err;
4502 }
4503
4504 /*
4505  *      start_crypto - Register the crypto algorithms.
4506  *      This should called once when the first device comesup. After this
4507  *      kernel will start calling driver APIs for crypto operations.
4508  */
4509 int start_crypto(void)
4510 {
4511         return chcr_register_alg();
4512 }
4513
4514 /*
4515  *      stop_crypto - Deregister all the crypto algorithms with kernel.
4516  *      This should be called once when the last device goes down. After this
4517  *      kernel will not call the driver API for crypto operations.
4518  */
4519 int stop_crypto(void)
4520 {
4521         chcr_unregister_alg();
4522         return 0;
4523 }