crypto: chelsio - Fix some kernel-doc issues
[linux-2.6-microblaze.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya (manojmalviya@chelsio.com)
36  *      Atul Gupta (atul.gupta@chelsio.com)
37  *      Jitendra Lulla (jlulla@chelsio.com)
38  *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *      Harsh Jain (harsh@chelsio.com)
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77         0, 0, 16, 24, 40, 48, 64, 72, 88,
78         96, 112, 120, 136, 144, 160, 168, 184,
79         192, 208, 216, 232, 240, 256, 264, 280,
80         288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84         0, 32, 32, 48, 48, 64, 64, 80, 80,
85         112, 112, 128, 128, 144, 144, 160, 160,
86         192, 192, 208, 208, 224, 224, 240, 240,
87         272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91         0x01000000, 0x02000000, 0x04000000, 0x08000000,
92         0x10000000, 0x20000000, 0x40000000, 0x80000000,
93         0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97                                    unsigned char *input, int err);
98
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101         return ctx->crypto_ctx->aeadctx;
102 }
103
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106         return ctx->crypto_ctx->ablkctx;
107 }
108
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111         return ctx->crypto_ctx->hmacctx;
112 }
113
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116         return gctx->ctx->gcm;
117 }
118
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121         return gctx->ctx->authenc;
122 }
123
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126         return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131         return (skb->len <= SGE_MAX_WR_LEN);
132 }
133
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136         memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140                          unsigned int entlen,
141                          unsigned int skip)
142 {
143         int nents = 0;
144         unsigned int less;
145         unsigned int skip_len = 0;
146
147         while (sg && skip) {
148                 if (sg_dma_len(sg) <= skip) {
149                         skip -= sg_dma_len(sg);
150                         skip_len = 0;
151                         sg = sg_next(sg);
152                 } else {
153                         skip_len = skip;
154                         skip = 0;
155                 }
156         }
157
158         while (sg && reqlen) {
159                 less = min(reqlen, sg_dma_len(sg) - skip_len);
160                 nents += DIV_ROUND_UP(less, entlen);
161                 reqlen -= less;
162                 skip_len = 0;
163                 sg = sg_next(sg);
164         }
165         return nents;
166 }
167
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170         struct aead_alg *alg = crypto_aead_alg(aead);
171         struct chcr_alg_template *chcr_crypto_alg =
172                 container_of(alg, struct chcr_alg_template, alg.aead);
173         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178         u8 temp[SHA512_DIGEST_SIZE];
179         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180         int authsize = crypto_aead_authsize(tfm);
181         struct cpl_fw6_pld *fw6_pld;
182         int cmp = 0;
183
184         fw6_pld = (struct cpl_fw6_pld *)input;
185         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188         } else {
189
190                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191                                 authsize, req->assoclen +
192                                 req->cryptlen - authsize);
193                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194         }
195         if (cmp)
196                 *err = -EBADMSG;
197         else
198                 *err = 0;
199 }
200
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203         if (dev->state == CHCR_DETACH)
204                 return 1;
205         atomic_inc(&dev->inflight);
206         return 0;
207 }
208
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210 {
211         atomic_dec(&dev->inflight);
212 }
213
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215                                          unsigned char *input,
216                                          int err)
217 {
218         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220         struct chcr_dev *dev = a_ctx(tfm)->dev;
221
222         chcr_aead_common_exit(req);
223         if (reqctx->verify == VERIFY_SW) {
224                 chcr_verify_tag(req, input, &err);
225                 reqctx->verify = VERIFY_HW;
226         }
227         chcr_dec_wrcount(dev);
228         req->base.complete(&req->base, err);
229
230         return err;
231 }
232
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234                                        const unsigned char *key,
235                                        unsigned int keylength)
236 {
237         u32 temp;
238         u32 w_ring[MAX_NK];
239         int i, j, k;
240         u8  nr, nk;
241
242         switch (keylength) {
243         case AES_KEYLENGTH_128BIT:
244                 nk = KEYLENGTH_4BYTES;
245                 nr = NUMBER_OF_ROUNDS_10;
246                 break;
247         case AES_KEYLENGTH_192BIT:
248                 nk = KEYLENGTH_6BYTES;
249                 nr = NUMBER_OF_ROUNDS_12;
250                 break;
251         case AES_KEYLENGTH_256BIT:
252                 nk = KEYLENGTH_8BYTES;
253                 nr = NUMBER_OF_ROUNDS_14;
254                 break;
255         default:
256                 return;
257         }
258         for (i = 0; i < nk; i++)
259                 w_ring[i] = get_unaligned_be32(&key[i * 4]);
260
261         i = 0;
262         temp = w_ring[nk - 1];
263         while (i + nk < (nr + 1) * 4) {
264                 if (!(i % nk)) {
265                         /* RotWord(temp) */
266                         temp = (temp << 8) | (temp >> 24);
267                         temp = aes_ks_subword(temp);
268                         temp ^= round_constant[i / nk];
269                 } else if (nk == 8 && (i % 4 == 0)) {
270                         temp = aes_ks_subword(temp);
271                 }
272                 w_ring[i % nk] ^= temp;
273                 temp = w_ring[i % nk];
274                 i++;
275         }
276         i--;
277         for (k = 0, j = i % nk; k < nk; k++) {
278                 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
279                 j--;
280                 if (j < 0)
281                         j += nk;
282         }
283 }
284
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286 {
287         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288
289         switch (ds) {
290         case SHA1_DIGEST_SIZE:
291                 base_hash = crypto_alloc_shash("sha1", 0, 0);
292                 break;
293         case SHA224_DIGEST_SIZE:
294                 base_hash = crypto_alloc_shash("sha224", 0, 0);
295                 break;
296         case SHA256_DIGEST_SIZE:
297                 base_hash = crypto_alloc_shash("sha256", 0, 0);
298                 break;
299         case SHA384_DIGEST_SIZE:
300                 base_hash = crypto_alloc_shash("sha384", 0, 0);
301                 break;
302         case SHA512_DIGEST_SIZE:
303                 base_hash = crypto_alloc_shash("sha512", 0, 0);
304                 break;
305         }
306
307         return base_hash;
308 }
309
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311                                      char *iopad, char *result_hash,
312                                      int digest_size)
313 {
314         struct sha1_state sha1_st;
315         struct sha256_state sha256_st;
316         struct sha512_state sha512_st;
317         int error;
318
319         if (digest_size == SHA1_DIGEST_SIZE) {
320                 error = crypto_shash_init(desc) ?:
321                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322                         crypto_shash_export(desc, (void *)&sha1_st);
323                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324         } else if (digest_size == SHA224_DIGEST_SIZE) {
325                 error = crypto_shash_init(desc) ?:
326                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327                         crypto_shash_export(desc, (void *)&sha256_st);
328                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329
330         } else if (digest_size == SHA256_DIGEST_SIZE) {
331                 error = crypto_shash_init(desc) ?:
332                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333                         crypto_shash_export(desc, (void *)&sha256_st);
334                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335
336         } else if (digest_size == SHA384_DIGEST_SIZE) {
337                 error = crypto_shash_init(desc) ?:
338                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339                         crypto_shash_export(desc, (void *)&sha512_st);
340                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341
342         } else if (digest_size == SHA512_DIGEST_SIZE) {
343                 error = crypto_shash_init(desc) ?:
344                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345                         crypto_shash_export(desc, (void *)&sha512_st);
346                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347         } else {
348                 error = -EINVAL;
349                 pr_err("Unknown digest size %d\n", digest_size);
350         }
351         return error;
352 }
353
354 static void chcr_change_order(char *buf, int ds)
355 {
356         int i;
357
358         if (ds == SHA512_DIGEST_SIZE) {
359                 for (i = 0; i < (ds / sizeof(u64)); i++)
360                         *((__be64 *)buf + i) =
361                                 cpu_to_be64(*((u64 *)buf + i));
362         } else {
363                 for (i = 0; i < (ds / sizeof(u32)); i++)
364                         *((__be32 *)buf + i) =
365                                 cpu_to_be32(*((u32 *)buf + i));
366         }
367 }
368
369 static inline int is_hmac(struct crypto_tfm *tfm)
370 {
371         struct crypto_alg *alg = tfm->__crt_alg;
372         struct chcr_alg_template *chcr_crypto_alg =
373                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374                              alg.hash);
375         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376                 return 1;
377         return 0;
378 }
379
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381                                    struct cpl_rx_phys_dsgl *dsgl)
382 {
383         walk->dsgl = dsgl;
384         walk->nents = 0;
385         walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386 }
387
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389                                  int pci_chan_id)
390 {
391         struct cpl_rx_phys_dsgl *phys_cpl;
392
393         phys_cpl = walk->dsgl;
394
395         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397         phys_cpl->pcirlxorder_to_noofsgentr =
398                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
403                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405         phys_cpl->rss_hdr_int.qid = htons(qid);
406         phys_cpl->rss_hdr_int.hash_val = 0;
407         phys_cpl->rss_hdr_int.channel = pci_chan_id;
408 }
409
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411                                         size_t size,
412                                         dma_addr_t addr)
413 {
414         int j;
415
416         if (!size)
417                 return;
418         j = walk->nents;
419         walk->to->len[j % 8] = htons(size);
420         walk->to->addr[j % 8] = cpu_to_be64(addr);
421         j++;
422         if ((j % 8) == 0)
423                 walk->to++;
424         walk->nents = j;
425 }
426
427 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
428                            struct scatterlist *sg,
429                               unsigned int slen,
430                               unsigned int skip)
431 {
432         int skip_len = 0;
433         unsigned int left_size = slen, len = 0;
434         unsigned int j = walk->nents;
435         int offset, ent_len;
436
437         if (!slen)
438                 return;
439         while (sg && skip) {
440                 if (sg_dma_len(sg) <= skip) {
441                         skip -= sg_dma_len(sg);
442                         skip_len = 0;
443                         sg = sg_next(sg);
444                 } else {
445                         skip_len = skip;
446                         skip = 0;
447                 }
448         }
449
450         while (left_size && sg) {
451                 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452                 offset = 0;
453                 while (len) {
454                         ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
455                         walk->to->len[j % 8] = htons(ent_len);
456                         walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457                                                       offset + skip_len);
458                         offset += ent_len;
459                         len -= ent_len;
460                         j++;
461                         if ((j % 8) == 0)
462                                 walk->to++;
463                 }
464                 walk->last_sg = sg;
465                 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466                                           skip_len) + skip_len;
467                 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468                 skip_len = 0;
469                 sg = sg_next(sg);
470         }
471         walk->nents = j;
472 }
473
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475                                    struct ulptx_sgl *ulp)
476 {
477         walk->sgl = ulp;
478         walk->nents = 0;
479         walk->pair_idx = 0;
480         walk->pair = ulp->sge;
481         walk->last_sg = NULL;
482         walk->last_sg_len = 0;
483 }
484
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
486 {
487         walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488                               ULPTX_NSGE_V(walk->nents));
489 }
490
491
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493                                         size_t size,
494                                         dma_addr_t addr)
495 {
496         if (!size)
497                 return;
498
499         if (walk->nents == 0) {
500                 walk->sgl->len0 = cpu_to_be32(size);
501                 walk->sgl->addr0 = cpu_to_be64(addr);
502         } else {
503                 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504                 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505                 walk->pair_idx = !walk->pair_idx;
506                 if (!walk->pair_idx)
507                         walk->pair++;
508         }
509         walk->nents++;
510 }
511
512 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
513                                         struct scatterlist *sg,
514                                unsigned int len,
515                                unsigned int skip)
516 {
517         int small;
518         int skip_len = 0;
519         unsigned int sgmin;
520
521         if (!len)
522                 return;
523         while (sg && skip) {
524                 if (sg_dma_len(sg) <= skip) {
525                         skip -= sg_dma_len(sg);
526                         skip_len = 0;
527                         sg = sg_next(sg);
528                 } else {
529                         skip_len = skip;
530                         skip = 0;
531                 }
532         }
533         WARN(!sg, "SG should not be null here\n");
534         if (sg && (walk->nents == 0)) {
535                 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537                 walk->sgl->len0 = cpu_to_be32(sgmin);
538                 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539                 walk->nents++;
540                 len -= sgmin;
541                 walk->last_sg = sg;
542                 walk->last_sg_len = sgmin + skip_len;
543                 skip_len += sgmin;
544                 if (sg_dma_len(sg) == skip_len) {
545                         sg = sg_next(sg);
546                         skip_len = 0;
547                 }
548         }
549
550         while (sg && len) {
551                 small = min(sg_dma_len(sg) - skip_len, len);
552                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553                 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554                 walk->pair->addr[walk->pair_idx] =
555                         cpu_to_be64(sg_dma_address(sg) + skip_len);
556                 walk->pair_idx = !walk->pair_idx;
557                 walk->nents++;
558                 if (!walk->pair_idx)
559                         walk->pair++;
560                 len -= sgmin;
561                 skip_len += sgmin;
562                 walk->last_sg = sg;
563                 walk->last_sg_len = skip_len;
564                 if (sg_dma_len(sg) == skip_len) {
565                         sg = sg_next(sg);
566                         skip_len = 0;
567                 }
568         }
569 }
570
571 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
572 {
573         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
574         struct chcr_alg_template *chcr_crypto_alg =
575                 container_of(alg, struct chcr_alg_template, alg.skcipher);
576
577         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578 }
579
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581 {
582         struct adapter *adap = netdev2adap(dev);
583         struct sge_uld_txq_info *txq_info =
584                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585         struct sge_uld_txq *txq;
586         int ret = 0;
587
588         local_bh_disable();
589         txq = &txq_info->uldtxq[idx];
590         spin_lock(&txq->sendq.lock);
591         if (txq->full)
592                 ret = -1;
593         spin_unlock(&txq->sendq.lock);
594         local_bh_enable();
595         return ret;
596 }
597
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599                                struct _key_ctx *key_ctx)
600 {
601         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603         } else {
604                 memcpy(key_ctx->key,
605                        ablkctx->key + (ablkctx->enckey_len >> 1),
606                        ablkctx->enckey_len >> 1);
607                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
609         }
610         return 0;
611 }
612
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
614                              unsigned int minsg,
615                              unsigned int space,
616                              unsigned int srcskip)
617 {
618         int srclen = 0;
619         int srcsg = minsg;
620         int soffset = 0, sless;
621
622         if (sg_dma_len(src) == srcskip) {
623                 src = sg_next(src);
624                 srcskip = 0;
625         }
626         while (src && space > (sgl_ent_len[srcsg + 1])) {
627                 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
628                                                         CHCR_SRC_SG_SIZE);
629                 srclen += sless;
630                 soffset += sless;
631                 srcsg++;
632                 if (sg_dma_len(src) == (soffset + srcskip)) {
633                         src = sg_next(src);
634                         soffset = 0;
635                         srcskip = 0;
636                 }
637         }
638         return srclen;
639 }
640
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642                              struct scatterlist *dst,
643                              unsigned int minsg,
644                              unsigned int space,
645                              unsigned int srcskip,
646                              unsigned int dstskip)
647 {
648         int srclen = 0, dstlen = 0;
649         int srcsg = minsg, dstsg = minsg;
650         int offset = 0, soffset = 0, less, sless = 0;
651
652         if (sg_dma_len(src) == srcskip) {
653                 src = sg_next(src);
654                 srcskip = 0;
655         }
656         if (sg_dma_len(dst) == dstskip) {
657                 dst = sg_next(dst);
658                 dstskip = 0;
659         }
660
661         while (src && dst &&
662                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663                 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664                                 CHCR_SRC_SG_SIZE);
665                 srclen += sless;
666                 srcsg++;
667                 offset = 0;
668                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670                         if (srclen <= dstlen)
671                                 break;
672                         less = min_t(unsigned int, sg_dma_len(dst) - offset -
673                                      dstskip, CHCR_DST_SG_SIZE);
674                         dstlen += less;
675                         offset += less;
676                         if ((offset + dstskip) == sg_dma_len(dst)) {
677                                 dst = sg_next(dst);
678                                 offset = 0;
679                         }
680                         dstsg++;
681                         dstskip = 0;
682                 }
683                 soffset += sless;
684                 if ((soffset + srcskip) == sg_dma_len(src)) {
685                         src = sg_next(src);
686                         srcskip = 0;
687                         soffset = 0;
688                 }
689
690         }
691         return min(srclen, dstlen);
692 }
693
694 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
695                                 struct skcipher_request *req,
696                                 u8 *iv,
697                                 unsigned short op_type)
698 {
699         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
700         int err;
701
702         skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
703         skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
704                                       req->base.complete, req->base.data);
705         skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
706                                    req->cryptlen, iv);
707
708         err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
709                         crypto_skcipher_encrypt(&reqctx->fallback_req);
710
711         return err;
712
713 }
714
715 static inline int get_qidxs(struct crypto_async_request *req,
716                             unsigned int *txqidx, unsigned int *rxqidx)
717 {
718         struct crypto_tfm *tfm = req->tfm;
719         int ret = 0;
720
721         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
722         case CRYPTO_ALG_TYPE_AEAD:
723         {
724                 struct aead_request *aead_req =
725                         container_of(req, struct aead_request, base);
726                 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
727                 *txqidx = reqctx->txqidx;
728                 *rxqidx = reqctx->rxqidx;
729                 break;
730         }
731         case CRYPTO_ALG_TYPE_SKCIPHER:
732         {
733                 struct skcipher_request *sk_req =
734                         container_of(req, struct skcipher_request, base);
735                 struct chcr_skcipher_req_ctx *reqctx =
736                         skcipher_request_ctx(sk_req);
737                 *txqidx = reqctx->txqidx;
738                 *rxqidx = reqctx->rxqidx;
739                 break;
740         }
741         case CRYPTO_ALG_TYPE_AHASH:
742         {
743                 struct ahash_request *ahash_req =
744                         container_of(req, struct ahash_request, base);
745                 struct chcr_ahash_req_ctx *reqctx =
746                         ahash_request_ctx(ahash_req);
747                 *txqidx = reqctx->txqidx;
748                 *rxqidx = reqctx->rxqidx;
749                 break;
750         }
751         default:
752                 ret = -EINVAL;
753                 /* should never get here */
754                 BUG();
755                 break;
756         }
757         return ret;
758 }
759
760 static inline void create_wreq(struct chcr_context *ctx,
761                                struct chcr_wr *chcr_req,
762                                struct crypto_async_request *req,
763                                unsigned int imm,
764                                int hash_sz,
765                                unsigned int len16,
766                                unsigned int sc_len,
767                                unsigned int lcb)
768 {
769         struct uld_ctx *u_ctx = ULD_CTX(ctx);
770         unsigned int tx_channel_id, rx_channel_id;
771         unsigned int txqidx = 0, rxqidx = 0;
772         unsigned int qid, fid;
773
774         get_qidxs(req, &txqidx, &rxqidx);
775         qid = u_ctx->lldi.rxq_ids[rxqidx];
776         fid = u_ctx->lldi.rxq_ids[0];
777         tx_channel_id = txqidx / ctx->txq_perchan;
778         rx_channel_id = rxqidx / ctx->rxq_perchan;
779
780
781         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
782         chcr_req->wreq.pld_size_hash_size =
783                 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
784         chcr_req->wreq.len16_pkd =
785                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
786         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
787         chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
788                                                             !!lcb, txqidx);
789
790         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
791         chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
792                                 ((sizeof(chcr_req->wreq)) >> 4)));
793         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
794         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
795                                            sizeof(chcr_req->key_ctx) + sc_len);
796 }
797
798 /**
799  *      create_cipher_wr - form the WR for cipher operations
800  *      @wrparam: Container for create_cipher_wr()'s parameters
801  */
802 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
803 {
804         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
805         struct chcr_context *ctx = c_ctx(tfm);
806         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
807         struct sk_buff *skb = NULL;
808         struct chcr_wr *chcr_req;
809         struct cpl_rx_phys_dsgl *phys_cpl;
810         struct ulptx_sgl *ulptx;
811         struct chcr_skcipher_req_ctx *reqctx =
812                 skcipher_request_ctx(wrparam->req);
813         unsigned int temp = 0, transhdr_len, dst_size;
814         int error;
815         int nents;
816         unsigned int kctx_len;
817         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
818                         GFP_KERNEL : GFP_ATOMIC;
819         struct adapter *adap = padap(ctx->dev);
820         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
821
822         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
823                               reqctx->dst_ofst);
824         dst_size = get_space_for_phys_dsgl(nents);
825         kctx_len = roundup(ablkctx->enckey_len, 16);
826         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
827         nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
828                                   CHCR_SRC_SG_SIZE, reqctx->src_ofst);
829         temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
830                                      (sgl_len(nents) * 8);
831         transhdr_len += temp;
832         transhdr_len = roundup(transhdr_len, 16);
833         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
834         if (!skb) {
835                 error = -ENOMEM;
836                 goto err;
837         }
838         chcr_req = __skb_put_zero(skb, transhdr_len);
839         chcr_req->sec_cpl.op_ivinsrtofst =
840                         FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
841
842         chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
843         chcr_req->sec_cpl.aadstart_cipherstop_hi =
844                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
845
846         chcr_req->sec_cpl.cipherstop_lo_authinsert =
847                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
848         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
849                                                          ablkctx->ciph_mode,
850                                                          0, 0, IV >> 1);
851         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
852                                                           0, 1, dst_size);
853
854         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
855         if ((reqctx->op == CHCR_DECRYPT_OP) &&
856             (!(get_cryptoalg_subtype(tfm) ==
857                CRYPTO_ALG_SUB_TYPE_CTR)) &&
858             (!(get_cryptoalg_subtype(tfm) ==
859                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
860                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
861         } else {
862                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
863                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
864                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
865                                ablkctx->enckey_len);
866                 } else {
867                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
868                                (ablkctx->enckey_len >> 1),
869                                ablkctx->enckey_len >> 1);
870                         memcpy(chcr_req->key_ctx.key +
871                                (ablkctx->enckey_len >> 1),
872                                ablkctx->key,
873                                ablkctx->enckey_len >> 1);
874                 }
875         }
876         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
877         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
878         chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
879         chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
880
881         atomic_inc(&adap->chcr_stats.cipher_rqst);
882         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
883                 + (reqctx->imm ? (wrparam->bytes) : 0);
884         create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
885                     transhdr_len, temp,
886                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
887         reqctx->skb = skb;
888
889         if (reqctx->op && (ablkctx->ciph_mode ==
890                            CHCR_SCMD_CIPHER_MODE_AES_CBC))
891                 sg_pcopy_to_buffer(wrparam->req->src,
892                         sg_nents(wrparam->req->src), wrparam->req->iv, 16,
893                         reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
894
895         return skb;
896 err:
897         return ERR_PTR(error);
898 }
899
900 static inline int chcr_keyctx_ck_size(unsigned int keylen)
901 {
902         int ck_size = 0;
903
904         if (keylen == AES_KEYSIZE_128)
905                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
906         else if (keylen == AES_KEYSIZE_192)
907                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
908         else if (keylen == AES_KEYSIZE_256)
909                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
910         else
911                 ck_size = 0;
912
913         return ck_size;
914 }
915 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
916                                        const u8 *key,
917                                        unsigned int keylen)
918 {
919         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
920
921         crypto_skcipher_clear_flags(ablkctx->sw_cipher,
922                                 CRYPTO_TFM_REQ_MASK);
923         crypto_skcipher_set_flags(ablkctx->sw_cipher,
924                                 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
925         return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
926 }
927
928 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
929                                const u8 *key,
930                                unsigned int keylen)
931 {
932         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
933         unsigned int ck_size, context_size;
934         u16 alignment = 0;
935         int err;
936
937         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
938         if (err)
939                 goto badkey_err;
940
941         ck_size = chcr_keyctx_ck_size(keylen);
942         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
943         memcpy(ablkctx->key, key, keylen);
944         ablkctx->enckey_len = keylen;
945         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
946         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
947                         keylen + alignment) >> 4;
948
949         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
950                                                 0, 0, context_size);
951         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
952         return 0;
953 badkey_err:
954         ablkctx->enckey_len = 0;
955
956         return err;
957 }
958
959 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
960                                    const u8 *key,
961                                    unsigned int keylen)
962 {
963         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
964         unsigned int ck_size, context_size;
965         u16 alignment = 0;
966         int err;
967
968         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
969         if (err)
970                 goto badkey_err;
971         ck_size = chcr_keyctx_ck_size(keylen);
972         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973         memcpy(ablkctx->key, key, keylen);
974         ablkctx->enckey_len = keylen;
975         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976                         keylen + alignment) >> 4;
977
978         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
979                                                 0, 0, context_size);
980         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
981
982         return 0;
983 badkey_err:
984         ablkctx->enckey_len = 0;
985
986         return err;
987 }
988
989 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
990                                    const u8 *key,
991                                    unsigned int keylen)
992 {
993         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
994         unsigned int ck_size, context_size;
995         u16 alignment = 0;
996         int err;
997
998         if (keylen < CTR_RFC3686_NONCE_SIZE)
999                 return -EINVAL;
1000         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1001                CTR_RFC3686_NONCE_SIZE);
1002
1003         keylen -= CTR_RFC3686_NONCE_SIZE;
1004         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1005         if (err)
1006                 goto badkey_err;
1007
1008         ck_size = chcr_keyctx_ck_size(keylen);
1009         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1010         memcpy(ablkctx->key, key, keylen);
1011         ablkctx->enckey_len = keylen;
1012         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1013                         keylen + alignment) >> 4;
1014
1015         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1016                                                 0, 0, context_size);
1017         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1018
1019         return 0;
1020 badkey_err:
1021         ablkctx->enckey_len = 0;
1022
1023         return err;
1024 }
1025 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1026 {
1027         unsigned int size = AES_BLOCK_SIZE;
1028         __be32 *b = (__be32 *)(dstiv + size);
1029         u32 c, prev;
1030
1031         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1032         for (; size >= 4; size -= 4) {
1033                 prev = be32_to_cpu(*--b);
1034                 c = prev + add;
1035                 *b = cpu_to_be32(c);
1036                 if (prev < c)
1037                         break;
1038                 add = 1;
1039         }
1040
1041 }
1042
1043 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1044 {
1045         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1046         u64 c;
1047         u32 temp = be32_to_cpu(*--b);
1048
1049         temp = ~temp;
1050         c = (u64)temp +  1; // No of block can processed without overflow
1051         if ((bytes / AES_BLOCK_SIZE) >= c)
1052                 bytes = c * AES_BLOCK_SIZE;
1053         return bytes;
1054 }
1055
1056 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1057                              u32 isfinal)
1058 {
1059         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1060         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1061         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1062         struct crypto_aes_ctx aes;
1063         int ret, i;
1064         u8 *key;
1065         unsigned int keylen;
1066         int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1067         int round8 = round / 8;
1068
1069         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1070
1071         keylen = ablkctx->enckey_len / 2;
1072         key = ablkctx->key + keylen;
1073         /* For a 192 bit key remove the padded zeroes which was
1074          * added in chcr_xts_setkey
1075          */
1076         if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1077                         == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1078                 ret = aes_expandkey(&aes, key, keylen - 8);
1079         else
1080                 ret = aes_expandkey(&aes, key, keylen);
1081         if (ret)
1082                 return ret;
1083         aes_encrypt(&aes, iv, iv);
1084         for (i = 0; i < round8; i++)
1085                 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1086
1087         for (i = 0; i < (round % 8); i++)
1088                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1089
1090         if (!isfinal)
1091                 aes_decrypt(&aes, iv, iv);
1092
1093         memzero_explicit(&aes, sizeof(aes));
1094         return 0;
1095 }
1096
1097 static int chcr_update_cipher_iv(struct skcipher_request *req,
1098                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1099 {
1100         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1101         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1102         int subtype = get_cryptoalg_subtype(tfm);
1103         int ret = 0;
1104
1105         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1106                 ctr_add_iv(iv, req->iv, (reqctx->processed /
1107                            AES_BLOCK_SIZE));
1108         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1109                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1110                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1111                                                 AES_BLOCK_SIZE) + 1);
1112         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1113                 ret = chcr_update_tweak(req, iv, 0);
1114         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1115                 if (reqctx->op)
1116                         /*Updated before sending last WR*/
1117                         memcpy(iv, req->iv, AES_BLOCK_SIZE);
1118                 else
1119                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1120         }
1121
1122         return ret;
1123
1124 }
1125
1126 /* We need separate function for final iv because in rfc3686  Initial counter
1127  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1128  * for subsequent update requests
1129  */
1130
1131 static int chcr_final_cipher_iv(struct skcipher_request *req,
1132                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1133 {
1134         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1135         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1136         int subtype = get_cryptoalg_subtype(tfm);
1137         int ret = 0;
1138
1139         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1140                 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1141                                                        AES_BLOCK_SIZE));
1142         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1143                 if (!reqctx->partial_req)
1144                         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1145                 else
1146                         ret = chcr_update_tweak(req, iv, 1);
1147         }
1148         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1149                 /*Already updated for Decrypt*/
1150                 if (!reqctx->op)
1151                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1152
1153         }
1154         return ret;
1155
1156 }
1157
1158 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1159                                    unsigned char *input, int err)
1160 {
1161         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1162         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1163         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1164         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1165         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1166         struct chcr_dev *dev = c_ctx(tfm)->dev;
1167         struct chcr_context *ctx = c_ctx(tfm);
1168         struct adapter *adap = padap(ctx->dev);
1169         struct cipher_wr_param wrparam;
1170         struct sk_buff *skb;
1171         int bytes;
1172
1173         if (err)
1174                 goto unmap;
1175         if (req->cryptlen == reqctx->processed) {
1176                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1177                                       req);
1178                 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1179                 goto complete;
1180         }
1181
1182         if (!reqctx->imm) {
1183                 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1184                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1185                                           reqctx->src_ofst, reqctx->dst_ofst);
1186                 if ((bytes + reqctx->processed) >= req->cryptlen)
1187                         bytes  = req->cryptlen - reqctx->processed;
1188                 else
1189                         bytes = rounddown(bytes, 16);
1190         } else {
1191                 /*CTR mode counter overfloa*/
1192                 bytes  = req->cryptlen - reqctx->processed;
1193         }
1194         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1195         if (err)
1196                 goto unmap;
1197
1198         if (unlikely(bytes == 0)) {
1199                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1200                                       req);
1201                 memcpy(req->iv, reqctx->init_iv, IV);
1202                 atomic_inc(&adap->chcr_stats.fallback);
1203                 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1204                                            reqctx->op);
1205                 goto complete;
1206         }
1207
1208         if (get_cryptoalg_subtype(tfm) ==
1209             CRYPTO_ALG_SUB_TYPE_CTR)
1210                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1211         wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1212         wrparam.req = req;
1213         wrparam.bytes = bytes;
1214         skb = create_cipher_wr(&wrparam);
1215         if (IS_ERR(skb)) {
1216                 pr_err("%s : Failed to form WR. No memory\n", __func__);
1217                 err = PTR_ERR(skb);
1218                 goto unmap;
1219         }
1220         skb->dev = u_ctx->lldi.ports[0];
1221         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1222         chcr_send_wr(skb);
1223         reqctx->last_req_len = bytes;
1224         reqctx->processed += bytes;
1225         if (get_cryptoalg_subtype(tfm) ==
1226                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1227                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1228                 complete(&ctx->cbc_aes_aio_done);
1229         }
1230         return 0;
1231 unmap:
1232         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1233 complete:
1234         if (get_cryptoalg_subtype(tfm) ==
1235                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1236                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1237                 complete(&ctx->cbc_aes_aio_done);
1238         }
1239         chcr_dec_wrcount(dev);
1240         req->base.complete(&req->base, err);
1241         return err;
1242 }
1243
1244 static int process_cipher(struct skcipher_request *req,
1245                                   unsigned short qid,
1246                                   struct sk_buff **skb,
1247                                   unsigned short op_type)
1248 {
1249         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1250         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1251         unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1252         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1253         struct adapter *adap = padap(c_ctx(tfm)->dev);
1254         struct  cipher_wr_param wrparam;
1255         int bytes, err = -EINVAL;
1256         int subtype;
1257
1258         reqctx->processed = 0;
1259         reqctx->partial_req = 0;
1260         if (!req->iv)
1261                 goto error;
1262         subtype = get_cryptoalg_subtype(tfm);
1263         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1264             (req->cryptlen == 0) ||
1265             (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1266                 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1267                         goto fallback;
1268                 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1269                          subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1270                         goto fallback;
1271                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1272                        ablkctx->enckey_len, req->cryptlen, ivsize);
1273                 goto error;
1274         }
1275
1276         err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1277         if (err)
1278                 goto error;
1279         if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1280                                             AES_MIN_KEY_SIZE +
1281                                             sizeof(struct cpl_rx_phys_dsgl) +
1282                                         /*Min dsgl size*/
1283                                             32))) {
1284                 /* Can be sent as Imm*/
1285                 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1286
1287                 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1288                                        CHCR_DST_SG_SIZE, 0);
1289                 phys_dsgl = get_space_for_phys_dsgl(dnents);
1290                 kctx_len = roundup(ablkctx->enckey_len, 16);
1291                 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1292                 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1293                         SGE_MAX_WR_LEN;
1294                 bytes = IV + req->cryptlen;
1295
1296         } else {
1297                 reqctx->imm = 0;
1298         }
1299
1300         if (!reqctx->imm) {
1301                 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1302                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1303                                           0, 0);
1304                 if ((bytes + reqctx->processed) >= req->cryptlen)
1305                         bytes  = req->cryptlen - reqctx->processed;
1306                 else
1307                         bytes = rounddown(bytes, 16);
1308         } else {
1309                 bytes = req->cryptlen;
1310         }
1311         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1312                 bytes = adjust_ctr_overflow(req->iv, bytes);
1313         }
1314         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1315                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1316                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1317                                 CTR_RFC3686_IV_SIZE);
1318
1319                 /* initialize counter portion of counter block */
1320                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1321                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1322                 memcpy(reqctx->init_iv, reqctx->iv, IV);
1323
1324         } else {
1325
1326                 memcpy(reqctx->iv, req->iv, IV);
1327                 memcpy(reqctx->init_iv, req->iv, IV);
1328         }
1329         if (unlikely(bytes == 0)) {
1330                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1331                                       req);
1332 fallback:       atomic_inc(&adap->chcr_stats.fallback);
1333                 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1334                                            subtype ==
1335                                            CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1336                                            reqctx->iv : req->iv,
1337                                            op_type);
1338                 goto error;
1339         }
1340         reqctx->op = op_type;
1341         reqctx->srcsg = req->src;
1342         reqctx->dstsg = req->dst;
1343         reqctx->src_ofst = 0;
1344         reqctx->dst_ofst = 0;
1345         wrparam.qid = qid;
1346         wrparam.req = req;
1347         wrparam.bytes = bytes;
1348         *skb = create_cipher_wr(&wrparam);
1349         if (IS_ERR(*skb)) {
1350                 err = PTR_ERR(*skb);
1351                 goto unmap;
1352         }
1353         reqctx->processed = bytes;
1354         reqctx->last_req_len = bytes;
1355         reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1356
1357         return 0;
1358 unmap:
1359         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1360 error:
1361         return err;
1362 }
1363
1364 static int chcr_aes_encrypt(struct skcipher_request *req)
1365 {
1366         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1367         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1368         struct chcr_dev *dev = c_ctx(tfm)->dev;
1369         struct sk_buff *skb = NULL;
1370         int err;
1371         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1372         struct chcr_context *ctx = c_ctx(tfm);
1373         unsigned int cpu;
1374
1375         cpu = get_cpu();
1376         reqctx->txqidx = cpu % ctx->ntxq;
1377         reqctx->rxqidx = cpu % ctx->nrxq;
1378         put_cpu();
1379
1380         err = chcr_inc_wrcount(dev);
1381         if (err)
1382                 return -ENXIO;
1383         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1384                                                 reqctx->txqidx) &&
1385                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1386                         err = -ENOSPC;
1387                         goto error;
1388         }
1389
1390         err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1391                              &skb, CHCR_ENCRYPT_OP);
1392         if (err || !skb)
1393                 return  err;
1394         skb->dev = u_ctx->lldi.ports[0];
1395         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1396         chcr_send_wr(skb);
1397         if (get_cryptoalg_subtype(tfm) ==
1398                 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1399                         CRYPTO_TFM_REQ_MAY_SLEEP ) {
1400                         reqctx->partial_req = 1;
1401                         wait_for_completion(&ctx->cbc_aes_aio_done);
1402         }
1403         return -EINPROGRESS;
1404 error:
1405         chcr_dec_wrcount(dev);
1406         return err;
1407 }
1408
1409 static int chcr_aes_decrypt(struct skcipher_request *req)
1410 {
1411         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1412         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1413         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1414         struct chcr_dev *dev = c_ctx(tfm)->dev;
1415         struct sk_buff *skb = NULL;
1416         int err;
1417         struct chcr_context *ctx = c_ctx(tfm);
1418         unsigned int cpu;
1419
1420         cpu = get_cpu();
1421         reqctx->txqidx = cpu % ctx->ntxq;
1422         reqctx->rxqidx = cpu % ctx->nrxq;
1423         put_cpu();
1424
1425         err = chcr_inc_wrcount(dev);
1426         if (err)
1427                 return -ENXIO;
1428
1429         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1430                                                 reqctx->txqidx) &&
1431                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1432                         return -ENOSPC;
1433         err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1434                              &skb, CHCR_DECRYPT_OP);
1435         if (err || !skb)
1436                 return err;
1437         skb->dev = u_ctx->lldi.ports[0];
1438         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1439         chcr_send_wr(skb);
1440         return -EINPROGRESS;
1441 }
1442 static int chcr_device_init(struct chcr_context *ctx)
1443 {
1444         struct uld_ctx *u_ctx = NULL;
1445         int txq_perchan, ntxq;
1446         int err = 0, rxq_perchan;
1447
1448         if (!ctx->dev) {
1449                 u_ctx = assign_chcr_device();
1450                 if (!u_ctx) {
1451                         err = -ENXIO;
1452                         pr_err("chcr device assignment fails\n");
1453                         goto out;
1454                 }
1455                 ctx->dev = &u_ctx->dev;
1456                 ntxq = u_ctx->lldi.ntxq;
1457                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1458                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1459                 ctx->ntxq = ntxq;
1460                 ctx->nrxq = u_ctx->lldi.nrxq;
1461                 ctx->rxq_perchan = rxq_perchan;
1462                 ctx->txq_perchan = txq_perchan;
1463         }
1464 out:
1465         return err;
1466 }
1467
1468 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1469 {
1470         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1471         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1472         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1473
1474         ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1475                                 CRYPTO_ALG_NEED_FALLBACK);
1476         if (IS_ERR(ablkctx->sw_cipher)) {
1477                 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1478                 return PTR_ERR(ablkctx->sw_cipher);
1479         }
1480         init_completion(&ctx->cbc_aes_aio_done);
1481         crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1482                                          crypto_skcipher_reqsize(ablkctx->sw_cipher));
1483
1484         return chcr_device_init(ctx);
1485 }
1486
1487 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1488 {
1489         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1490         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1491         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1492
1493         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1494          * cannot be used as fallback in chcr_handle_cipher_response
1495          */
1496         ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1497                                 CRYPTO_ALG_NEED_FALLBACK);
1498         if (IS_ERR(ablkctx->sw_cipher)) {
1499                 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1500                 return PTR_ERR(ablkctx->sw_cipher);
1501         }
1502         crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1503                                     crypto_skcipher_reqsize(ablkctx->sw_cipher));
1504         return chcr_device_init(ctx);
1505 }
1506
1507
1508 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1509 {
1510         struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1511         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1512
1513         crypto_free_skcipher(ablkctx->sw_cipher);
1514 }
1515
1516 static int get_alg_config(struct algo_param *params,
1517                           unsigned int auth_size)
1518 {
1519         switch (auth_size) {
1520         case SHA1_DIGEST_SIZE:
1521                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1522                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1523                 params->result_size = SHA1_DIGEST_SIZE;
1524                 break;
1525         case SHA224_DIGEST_SIZE:
1526                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1527                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1528                 params->result_size = SHA256_DIGEST_SIZE;
1529                 break;
1530         case SHA256_DIGEST_SIZE:
1531                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1532                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1533                 params->result_size = SHA256_DIGEST_SIZE;
1534                 break;
1535         case SHA384_DIGEST_SIZE:
1536                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1537                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1538                 params->result_size = SHA512_DIGEST_SIZE;
1539                 break;
1540         case SHA512_DIGEST_SIZE:
1541                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1542                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1543                 params->result_size = SHA512_DIGEST_SIZE;
1544                 break;
1545         default:
1546                 pr_err("ERROR, unsupported digest size\n");
1547                 return -EINVAL;
1548         }
1549         return 0;
1550 }
1551
1552 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1553 {
1554                 crypto_free_shash(base_hash);
1555 }
1556
1557 /**
1558  *      create_hash_wr - Create hash work request
1559  *      @req: Cipher req base
1560  *      @param: Container for create_hash_wr()'s parameters
1561  */
1562 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1563                                       struct hash_wr_param *param)
1564 {
1565         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1566         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1567         struct chcr_context *ctx = h_ctx(tfm);
1568         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1569         struct sk_buff *skb = NULL;
1570         struct uld_ctx *u_ctx = ULD_CTX(ctx);
1571         struct chcr_wr *chcr_req;
1572         struct ulptx_sgl *ulptx;
1573         unsigned int nents = 0, transhdr_len;
1574         unsigned int temp = 0;
1575         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1576                 GFP_ATOMIC;
1577         struct adapter *adap = padap(h_ctx(tfm)->dev);
1578         int error = 0;
1579         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1580
1581         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1582         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1583                                 param->sg_len) <= SGE_MAX_WR_LEN;
1584         nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1585                       CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1586         nents += param->bfr_len ? 1 : 0;
1587         transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1588                                 param->sg_len, 16) : (sgl_len(nents) * 8);
1589         transhdr_len = roundup(transhdr_len, 16);
1590
1591         skb = alloc_skb(transhdr_len, flags);
1592         if (!skb)
1593                 return ERR_PTR(-ENOMEM);
1594         chcr_req = __skb_put_zero(skb, transhdr_len);
1595
1596         chcr_req->sec_cpl.op_ivinsrtofst =
1597                 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1598
1599         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1600
1601         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1602                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1603         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1604                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1605         chcr_req->sec_cpl.seqno_numivs =
1606                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1607                                          param->opad_needed, 0);
1608
1609         chcr_req->sec_cpl.ivgen_hdrlen =
1610                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1611
1612         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1613                param->alg_prm.result_size);
1614
1615         if (param->opad_needed)
1616                 memcpy(chcr_req->key_ctx.key +
1617                        ((param->alg_prm.result_size <= 32) ? 32 :
1618                         CHCR_HASH_MAX_DIGEST_SIZE),
1619                        hmacctx->opad, param->alg_prm.result_size);
1620
1621         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1622                                             param->alg_prm.mk_size, 0,
1623                                             param->opad_needed,
1624                                             ((param->kctx_len +
1625                                              sizeof(chcr_req->key_ctx)) >> 4));
1626         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1627         ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1628                                      DUMMY_BYTES);
1629         if (param->bfr_len != 0) {
1630                 req_ctx->hctx_wr.dma_addr =
1631                         dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1632                                        param->bfr_len, DMA_TO_DEVICE);
1633                 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1634                                        req_ctx->hctx_wr. dma_addr)) {
1635                         error = -ENOMEM;
1636                         goto err;
1637                 }
1638                 req_ctx->hctx_wr.dma_len = param->bfr_len;
1639         } else {
1640                 req_ctx->hctx_wr.dma_addr = 0;
1641         }
1642         chcr_add_hash_src_ent(req, ulptx, param);
1643         /* Request upto max wr size */
1644         temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1645                                 (param->sg_len + param->bfr_len) : 0);
1646         atomic_inc(&adap->chcr_stats.digest_rqst);
1647         create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1648                     param->hash_size, transhdr_len,
1649                     temp,  0);
1650         req_ctx->hctx_wr.skb = skb;
1651         return skb;
1652 err:
1653         kfree_skb(skb);
1654         return  ERR_PTR(error);
1655 }
1656
1657 static int chcr_ahash_update(struct ahash_request *req)
1658 {
1659         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1660         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1661         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1662         struct chcr_context *ctx = h_ctx(rtfm);
1663         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1664         struct sk_buff *skb;
1665         u8 remainder = 0, bs;
1666         unsigned int nbytes = req->nbytes;
1667         struct hash_wr_param params;
1668         int error;
1669         unsigned int cpu;
1670
1671         cpu = get_cpu();
1672         req_ctx->txqidx = cpu % ctx->ntxq;
1673         req_ctx->rxqidx = cpu % ctx->nrxq;
1674         put_cpu();
1675
1676         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1677
1678         if (nbytes + req_ctx->reqlen >= bs) {
1679                 remainder = (nbytes + req_ctx->reqlen) % bs;
1680                 nbytes = nbytes + req_ctx->reqlen - remainder;
1681         } else {
1682                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1683                                    + req_ctx->reqlen, nbytes, 0);
1684                 req_ctx->reqlen += nbytes;
1685                 return 0;
1686         }
1687         error = chcr_inc_wrcount(dev);
1688         if (error)
1689                 return -ENXIO;
1690         /* Detach state for CHCR means lldi or padap is freed. Increasing
1691          * inflight count for dev guarantees that lldi and padap is valid
1692          */
1693         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1694                                                 req_ctx->txqidx) &&
1695                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1696                         error = -ENOSPC;
1697                         goto err;
1698         }
1699
1700         chcr_init_hctx_per_wr(req_ctx);
1701         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1702         if (error) {
1703                 error = -ENOMEM;
1704                 goto err;
1705         }
1706         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1707         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1708         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1709                                      HASH_SPACE_LEFT(params.kctx_len), 0);
1710         if (params.sg_len > req->nbytes)
1711                 params.sg_len = req->nbytes;
1712         params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1713                         req_ctx->reqlen;
1714         params.opad_needed = 0;
1715         params.more = 1;
1716         params.last = 0;
1717         params.bfr_len = req_ctx->reqlen;
1718         params.scmd1 = 0;
1719         req_ctx->hctx_wr.srcsg = req->src;
1720
1721         params.hash_size = params.alg_prm.result_size;
1722         req_ctx->data_len += params.sg_len + params.bfr_len;
1723         skb = create_hash_wr(req, &params);
1724         if (IS_ERR(skb)) {
1725                 error = PTR_ERR(skb);
1726                 goto unmap;
1727         }
1728
1729         req_ctx->hctx_wr.processed += params.sg_len;
1730         if (remainder) {
1731                 /* Swap buffers */
1732                 swap(req_ctx->reqbfr, req_ctx->skbfr);
1733                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1734                                    req_ctx->reqbfr, remainder, req->nbytes -
1735                                    remainder);
1736         }
1737         req_ctx->reqlen = remainder;
1738         skb->dev = u_ctx->lldi.ports[0];
1739         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1740         chcr_send_wr(skb);
1741         return -EINPROGRESS;
1742 unmap:
1743         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1744 err:
1745         chcr_dec_wrcount(dev);
1746         return error;
1747 }
1748
1749 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1750 {
1751         memset(bfr_ptr, 0, bs);
1752         *bfr_ptr = 0x80;
1753         if (bs == 64)
1754                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1755         else
1756                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1757 }
1758
1759 static int chcr_ahash_final(struct ahash_request *req)
1760 {
1761         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1762         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1763         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1764         struct hash_wr_param params;
1765         struct sk_buff *skb;
1766         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1767         struct chcr_context *ctx = h_ctx(rtfm);
1768         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1769         int error;
1770         unsigned int cpu;
1771
1772         cpu = get_cpu();
1773         req_ctx->txqidx = cpu % ctx->ntxq;
1774         req_ctx->rxqidx = cpu % ctx->nrxq;
1775         put_cpu();
1776
1777         error = chcr_inc_wrcount(dev);
1778         if (error)
1779                 return -ENXIO;
1780
1781         chcr_init_hctx_per_wr(req_ctx);
1782         if (is_hmac(crypto_ahash_tfm(rtfm)))
1783                 params.opad_needed = 1;
1784         else
1785                 params.opad_needed = 0;
1786         params.sg_len = 0;
1787         req_ctx->hctx_wr.isfinal = 1;
1788         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1789         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1790         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1791                 params.opad_needed = 1;
1792                 params.kctx_len *= 2;
1793         } else {
1794                 params.opad_needed = 0;
1795         }
1796
1797         req_ctx->hctx_wr.result = 1;
1798         params.bfr_len = req_ctx->reqlen;
1799         req_ctx->data_len += params.bfr_len + params.sg_len;
1800         req_ctx->hctx_wr.srcsg = req->src;
1801         if (req_ctx->reqlen == 0) {
1802                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1803                 params.last = 0;
1804                 params.more = 1;
1805                 params.scmd1 = 0;
1806                 params.bfr_len = bs;
1807
1808         } else {
1809                 params.scmd1 = req_ctx->data_len;
1810                 params.last = 1;
1811                 params.more = 0;
1812         }
1813         params.hash_size = crypto_ahash_digestsize(rtfm);
1814         skb = create_hash_wr(req, &params);
1815         if (IS_ERR(skb)) {
1816                 error = PTR_ERR(skb);
1817                 goto err;
1818         }
1819         req_ctx->reqlen = 0;
1820         skb->dev = u_ctx->lldi.ports[0];
1821         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1822         chcr_send_wr(skb);
1823         return -EINPROGRESS;
1824 err:
1825         chcr_dec_wrcount(dev);
1826         return error;
1827 }
1828
1829 static int chcr_ahash_finup(struct ahash_request *req)
1830 {
1831         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1832         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1833         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1834         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1835         struct chcr_context *ctx = h_ctx(rtfm);
1836         struct sk_buff *skb;
1837         struct hash_wr_param params;
1838         u8  bs;
1839         int error;
1840         unsigned int cpu;
1841
1842         cpu = get_cpu();
1843         req_ctx->txqidx = cpu % ctx->ntxq;
1844         req_ctx->rxqidx = cpu % ctx->nrxq;
1845         put_cpu();
1846
1847         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1848         error = chcr_inc_wrcount(dev);
1849         if (error)
1850                 return -ENXIO;
1851
1852         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1853                                                 req_ctx->txqidx) &&
1854                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1855                         error = -ENOSPC;
1856                         goto err;
1857         }
1858         chcr_init_hctx_per_wr(req_ctx);
1859         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1860         if (error) {
1861                 error = -ENOMEM;
1862                 goto err;
1863         }
1864
1865         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1866         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1867         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1868                 params.kctx_len *= 2;
1869                 params.opad_needed = 1;
1870         } else {
1871                 params.opad_needed = 0;
1872         }
1873
1874         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1875                                     HASH_SPACE_LEFT(params.kctx_len), 0);
1876         if (params.sg_len < req->nbytes) {
1877                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1878                         params.kctx_len /= 2;
1879                         params.opad_needed = 0;
1880                 }
1881                 params.last = 0;
1882                 params.more = 1;
1883                 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1884                                         - req_ctx->reqlen;
1885                 params.hash_size = params.alg_prm.result_size;
1886                 params.scmd1 = 0;
1887         } else {
1888                 params.last = 1;
1889                 params.more = 0;
1890                 params.sg_len = req->nbytes;
1891                 params.hash_size = crypto_ahash_digestsize(rtfm);
1892                 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1893                                 params.sg_len;
1894         }
1895         params.bfr_len = req_ctx->reqlen;
1896         req_ctx->data_len += params.bfr_len + params.sg_len;
1897         req_ctx->hctx_wr.result = 1;
1898         req_ctx->hctx_wr.srcsg = req->src;
1899         if ((req_ctx->reqlen + req->nbytes) == 0) {
1900                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1901                 params.last = 0;
1902                 params.more = 1;
1903                 params.scmd1 = 0;
1904                 params.bfr_len = bs;
1905         }
1906         skb = create_hash_wr(req, &params);
1907         if (IS_ERR(skb)) {
1908                 error = PTR_ERR(skb);
1909                 goto unmap;
1910         }
1911         req_ctx->reqlen = 0;
1912         req_ctx->hctx_wr.processed += params.sg_len;
1913         skb->dev = u_ctx->lldi.ports[0];
1914         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1915         chcr_send_wr(skb);
1916         return -EINPROGRESS;
1917 unmap:
1918         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1919 err:
1920         chcr_dec_wrcount(dev);
1921         return error;
1922 }
1923
1924 static int chcr_ahash_digest(struct ahash_request *req)
1925 {
1926         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1927         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1928         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1929         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1930         struct chcr_context *ctx = h_ctx(rtfm);
1931         struct sk_buff *skb;
1932         struct hash_wr_param params;
1933         u8  bs;
1934         int error;
1935         unsigned int cpu;
1936
1937         cpu = get_cpu();
1938         req_ctx->txqidx = cpu % ctx->ntxq;
1939         req_ctx->rxqidx = cpu % ctx->nrxq;
1940         put_cpu();
1941
1942         rtfm->init(req);
1943         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1944         error = chcr_inc_wrcount(dev);
1945         if (error)
1946                 return -ENXIO;
1947
1948         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1949                                                 req_ctx->txqidx) &&
1950                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1951                         error = -ENOSPC;
1952                         goto err;
1953         }
1954
1955         chcr_init_hctx_per_wr(req_ctx);
1956         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1957         if (error) {
1958                 error = -ENOMEM;
1959                 goto err;
1960         }
1961
1962         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1963         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1964         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1965                 params.kctx_len *= 2;
1966                 params.opad_needed = 1;
1967         } else {
1968                 params.opad_needed = 0;
1969         }
1970         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1971                                 HASH_SPACE_LEFT(params.kctx_len), 0);
1972         if (params.sg_len < req->nbytes) {
1973                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1974                         params.kctx_len /= 2;
1975                         params.opad_needed = 0;
1976                 }
1977                 params.last = 0;
1978                 params.more = 1;
1979                 params.scmd1 = 0;
1980                 params.sg_len = rounddown(params.sg_len, bs);
1981                 params.hash_size = params.alg_prm.result_size;
1982         } else {
1983                 params.sg_len = req->nbytes;
1984                 params.hash_size = crypto_ahash_digestsize(rtfm);
1985                 params.last = 1;
1986                 params.more = 0;
1987                 params.scmd1 = req->nbytes + req_ctx->data_len;
1988
1989         }
1990         params.bfr_len = 0;
1991         req_ctx->hctx_wr.result = 1;
1992         req_ctx->hctx_wr.srcsg = req->src;
1993         req_ctx->data_len += params.bfr_len + params.sg_len;
1994
1995         if (req->nbytes == 0) {
1996                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1997                 params.more = 1;
1998                 params.bfr_len = bs;
1999         }
2000
2001         skb = create_hash_wr(req, &params);
2002         if (IS_ERR(skb)) {
2003                 error = PTR_ERR(skb);
2004                 goto unmap;
2005         }
2006         req_ctx->hctx_wr.processed += params.sg_len;
2007         skb->dev = u_ctx->lldi.ports[0];
2008         set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2009         chcr_send_wr(skb);
2010         return -EINPROGRESS;
2011 unmap:
2012         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2013 err:
2014         chcr_dec_wrcount(dev);
2015         return error;
2016 }
2017
2018 static int chcr_ahash_continue(struct ahash_request *req)
2019 {
2020         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2021         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2022         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2023         struct chcr_context *ctx = h_ctx(rtfm);
2024         struct uld_ctx *u_ctx = ULD_CTX(ctx);
2025         struct sk_buff *skb;
2026         struct hash_wr_param params;
2027         u8  bs;
2028         int error;
2029         unsigned int cpu;
2030
2031         cpu = get_cpu();
2032         reqctx->txqidx = cpu % ctx->ntxq;
2033         reqctx->rxqidx = cpu % ctx->nrxq;
2034         put_cpu();
2035
2036         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2037         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2038         params.kctx_len = roundup(params.alg_prm.result_size, 16);
2039         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2040                 params.kctx_len *= 2;
2041                 params.opad_needed = 1;
2042         } else {
2043                 params.opad_needed = 0;
2044         }
2045         params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2046                                             HASH_SPACE_LEFT(params.kctx_len),
2047                                             hctx_wr->src_ofst);
2048         if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2049                 params.sg_len = req->nbytes - hctx_wr->processed;
2050         if (!hctx_wr->result ||
2051             ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2052                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2053                         params.kctx_len /= 2;
2054                         params.opad_needed = 0;
2055                 }
2056                 params.last = 0;
2057                 params.more = 1;
2058                 params.sg_len = rounddown(params.sg_len, bs);
2059                 params.hash_size = params.alg_prm.result_size;
2060                 params.scmd1 = 0;
2061         } else {
2062                 params.last = 1;
2063                 params.more = 0;
2064                 params.hash_size = crypto_ahash_digestsize(rtfm);
2065                 params.scmd1 = reqctx->data_len + params.sg_len;
2066         }
2067         params.bfr_len = 0;
2068         reqctx->data_len += params.sg_len;
2069         skb = create_hash_wr(req, &params);
2070         if (IS_ERR(skb)) {
2071                 error = PTR_ERR(skb);
2072                 goto err;
2073         }
2074         hctx_wr->processed += params.sg_len;
2075         skb->dev = u_ctx->lldi.ports[0];
2076         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2077         chcr_send_wr(skb);
2078         return 0;
2079 err:
2080         return error;
2081 }
2082
2083 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2084                                           unsigned char *input,
2085                                           int err)
2086 {
2087         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2088         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2089         int digestsize, updated_digestsize;
2090         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2091         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2092         struct chcr_dev *dev = h_ctx(tfm)->dev;
2093
2094         if (input == NULL)
2095                 goto out;
2096         digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2097         updated_digestsize = digestsize;
2098         if (digestsize == SHA224_DIGEST_SIZE)
2099                 updated_digestsize = SHA256_DIGEST_SIZE;
2100         else if (digestsize == SHA384_DIGEST_SIZE)
2101                 updated_digestsize = SHA512_DIGEST_SIZE;
2102
2103         if (hctx_wr->dma_addr) {
2104                 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2105                                  hctx_wr->dma_len, DMA_TO_DEVICE);
2106                 hctx_wr->dma_addr = 0;
2107         }
2108         if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2109                                  req->nbytes)) {
2110                 if (hctx_wr->result == 1) {
2111                         hctx_wr->result = 0;
2112                         memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2113                                digestsize);
2114                 } else {
2115                         memcpy(reqctx->partial_hash,
2116                                input + sizeof(struct cpl_fw6_pld),
2117                                updated_digestsize);
2118
2119                 }
2120                 goto unmap;
2121         }
2122         memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2123                updated_digestsize);
2124
2125         err = chcr_ahash_continue(req);
2126         if (err)
2127                 goto unmap;
2128         return;
2129 unmap:
2130         if (hctx_wr->is_sg_map)
2131                 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2132
2133
2134 out:
2135         chcr_dec_wrcount(dev);
2136         req->base.complete(&req->base, err);
2137 }
2138
2139 /*
2140  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
2141  *      @req: crypto request
2142  */
2143 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2144                          int err)
2145 {
2146         struct crypto_tfm *tfm = req->tfm;
2147         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2148         struct adapter *adap = padap(ctx->dev);
2149
2150         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2151         case CRYPTO_ALG_TYPE_AEAD:
2152                 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2153                 break;
2154
2155         case CRYPTO_ALG_TYPE_SKCIPHER:
2156                  chcr_handle_cipher_resp(skcipher_request_cast(req),
2157                                                input, err);
2158                 break;
2159         case CRYPTO_ALG_TYPE_AHASH:
2160                 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2161                 }
2162         atomic_inc(&adap->chcr_stats.complete);
2163         return err;
2164 }
2165 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2166 {
2167         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2168         struct chcr_ahash_req_ctx *state = out;
2169
2170         state->reqlen = req_ctx->reqlen;
2171         state->data_len = req_ctx->data_len;
2172         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2173         memcpy(state->partial_hash, req_ctx->partial_hash,
2174                CHCR_HASH_MAX_DIGEST_SIZE);
2175         chcr_init_hctx_per_wr(state);
2176         return 0;
2177 }
2178
2179 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2180 {
2181         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2182         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2183
2184         req_ctx->reqlen = state->reqlen;
2185         req_ctx->data_len = state->data_len;
2186         req_ctx->reqbfr = req_ctx->bfr1;
2187         req_ctx->skbfr = req_ctx->bfr2;
2188         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2189         memcpy(req_ctx->partial_hash, state->partial_hash,
2190                CHCR_HASH_MAX_DIGEST_SIZE);
2191         chcr_init_hctx_per_wr(req_ctx);
2192         return 0;
2193 }
2194
2195 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2196                              unsigned int keylen)
2197 {
2198         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2199         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2200         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2201         unsigned int i, err = 0, updated_digestsize;
2202
2203         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2204
2205         /* use the key to calculate the ipad and opad. ipad will sent with the
2206          * first request's data. opad will be sent with the final hash result
2207          * ipad in hmacctx->ipad and opad in hmacctx->opad location
2208          */
2209         shash->tfm = hmacctx->base_hash;
2210         if (keylen > bs) {
2211                 err = crypto_shash_digest(shash, key, keylen,
2212                                           hmacctx->ipad);
2213                 if (err)
2214                         goto out;
2215                 keylen = digestsize;
2216         } else {
2217                 memcpy(hmacctx->ipad, key, keylen);
2218         }
2219         memset(hmacctx->ipad + keylen, 0, bs - keylen);
2220         memcpy(hmacctx->opad, hmacctx->ipad, bs);
2221
2222         for (i = 0; i < bs / sizeof(int); i++) {
2223                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2224                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2225         }
2226
2227         updated_digestsize = digestsize;
2228         if (digestsize == SHA224_DIGEST_SIZE)
2229                 updated_digestsize = SHA256_DIGEST_SIZE;
2230         else if (digestsize == SHA384_DIGEST_SIZE)
2231                 updated_digestsize = SHA512_DIGEST_SIZE;
2232         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2233                                         hmacctx->ipad, digestsize);
2234         if (err)
2235                 goto out;
2236         chcr_change_order(hmacctx->ipad, updated_digestsize);
2237
2238         err = chcr_compute_partial_hash(shash, hmacctx->opad,
2239                                         hmacctx->opad, digestsize);
2240         if (err)
2241                 goto out;
2242         chcr_change_order(hmacctx->opad, updated_digestsize);
2243 out:
2244         return err;
2245 }
2246
2247 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2248                                unsigned int key_len)
2249 {
2250         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2251         unsigned short context_size = 0;
2252         int err;
2253
2254         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2255         if (err)
2256                 goto badkey_err;
2257
2258         memcpy(ablkctx->key, key, key_len);
2259         ablkctx->enckey_len = key_len;
2260         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2261         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2262         /* Both keys for xts must be aligned to 16 byte boundary
2263          * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2264          */
2265         if (key_len == 48) {
2266                 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2267                                 + 16) >> 4;
2268                 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2269                 memset(ablkctx->key + 24, 0, 8);
2270                 memset(ablkctx->key + 56, 0, 8);
2271                 ablkctx->enckey_len = 64;
2272                 ablkctx->key_ctx_hdr =
2273                         FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2274                                          CHCR_KEYCTX_NO_KEY, 1,
2275                                          0, context_size);
2276         } else {
2277                 ablkctx->key_ctx_hdr =
2278                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2279                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2280                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2281                                  CHCR_KEYCTX_NO_KEY, 1,
2282                                  0, context_size);
2283         }
2284         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2285         return 0;
2286 badkey_err:
2287         ablkctx->enckey_len = 0;
2288
2289         return err;
2290 }
2291
2292 static int chcr_sha_init(struct ahash_request *areq)
2293 {
2294         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2295         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2296         int digestsize =  crypto_ahash_digestsize(tfm);
2297
2298         req_ctx->data_len = 0;
2299         req_ctx->reqlen = 0;
2300         req_ctx->reqbfr = req_ctx->bfr1;
2301         req_ctx->skbfr = req_ctx->bfr2;
2302         copy_hash_init_values(req_ctx->partial_hash, digestsize);
2303
2304         return 0;
2305 }
2306
2307 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2308 {
2309         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2310                                  sizeof(struct chcr_ahash_req_ctx));
2311         return chcr_device_init(crypto_tfm_ctx(tfm));
2312 }
2313
2314 static int chcr_hmac_init(struct ahash_request *areq)
2315 {
2316         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2317         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2318         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2319         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2320         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2321
2322         chcr_sha_init(areq);
2323         req_ctx->data_len = bs;
2324         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2325                 if (digestsize == SHA224_DIGEST_SIZE)
2326                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2327                                SHA256_DIGEST_SIZE);
2328                 else if (digestsize == SHA384_DIGEST_SIZE)
2329                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2330                                SHA512_DIGEST_SIZE);
2331                 else
2332                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2333                                digestsize);
2334         }
2335         return 0;
2336 }
2337
2338 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2339 {
2340         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2341         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2342         unsigned int digestsize =
2343                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2344
2345         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2346                                  sizeof(struct chcr_ahash_req_ctx));
2347         hmacctx->base_hash = chcr_alloc_shash(digestsize);
2348         if (IS_ERR(hmacctx->base_hash))
2349                 return PTR_ERR(hmacctx->base_hash);
2350         return chcr_device_init(crypto_tfm_ctx(tfm));
2351 }
2352
2353 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2354 {
2355         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2356         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2357
2358         if (hmacctx->base_hash) {
2359                 chcr_free_shash(hmacctx->base_hash);
2360                 hmacctx->base_hash = NULL;
2361         }
2362 }
2363
2364 inline void chcr_aead_common_exit(struct aead_request *req)
2365 {
2366         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2367         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2368         struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2369
2370         chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2371 }
2372
2373 static int chcr_aead_common_init(struct aead_request *req)
2374 {
2375         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2376         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2377         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2378         unsigned int authsize = crypto_aead_authsize(tfm);
2379         int error = -EINVAL;
2380
2381         /* validate key size */
2382         if (aeadctx->enckey_len == 0)
2383                 goto err;
2384         if (reqctx->op && req->cryptlen < authsize)
2385                 goto err;
2386         if (reqctx->b0_len)
2387                 reqctx->scratch_pad = reqctx->iv + IV;
2388         else
2389                 reqctx->scratch_pad = NULL;
2390
2391         error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2392                                   reqctx->op);
2393         if (error) {
2394                 error = -ENOMEM;
2395                 goto err;
2396         }
2397
2398         return 0;
2399 err:
2400         return error;
2401 }
2402
2403 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2404                                    int aadmax, int wrlen,
2405                                    unsigned short op_type)
2406 {
2407         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2408
2409         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2410             dst_nents > MAX_DSGL_ENT ||
2411             (req->assoclen > aadmax) ||
2412             (wrlen > SGE_MAX_WR_LEN))
2413                 return 1;
2414         return 0;
2415 }
2416
2417 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2418 {
2419         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2420         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2421         struct aead_request *subreq = aead_request_ctx(req);
2422
2423         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2424         aead_request_set_callback(subreq, req->base.flags,
2425                                   req->base.complete, req->base.data);
2426         aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2427                                  req->iv);
2428         aead_request_set_ad(subreq, req->assoclen);
2429         return op_type ? crypto_aead_decrypt(subreq) :
2430                 crypto_aead_encrypt(subreq);
2431 }
2432
2433 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2434                                          unsigned short qid,
2435                                          int size)
2436 {
2437         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2438         struct chcr_context *ctx = a_ctx(tfm);
2439         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2440         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2441         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2442         struct sk_buff *skb = NULL;
2443         struct chcr_wr *chcr_req;
2444         struct cpl_rx_phys_dsgl *phys_cpl;
2445         struct ulptx_sgl *ulptx;
2446         unsigned int transhdr_len;
2447         unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2448         unsigned int   kctx_len = 0, dnents, snents;
2449         unsigned int  authsize = crypto_aead_authsize(tfm);
2450         int error = -EINVAL;
2451         u8 *ivptr;
2452         int null = 0;
2453         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2454                 GFP_ATOMIC;
2455         struct adapter *adap = padap(ctx->dev);
2456         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2457
2458         if (req->cryptlen == 0)
2459                 return NULL;
2460
2461         reqctx->b0_len = 0;
2462         error = chcr_aead_common_init(req);
2463         if (error)
2464                 return ERR_PTR(error);
2465
2466         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2467                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2468                 null = 1;
2469         }
2470         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2471                 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2472         dnents += MIN_AUTH_SG; // For IV
2473         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2474                                CHCR_SRC_SG_SIZE, 0);
2475         dst_size = get_space_for_phys_dsgl(dnents);
2476         kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2477                 - sizeof(chcr_req->key_ctx);
2478         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2479         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2480                         SGE_MAX_WR_LEN;
2481         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2482                         : (sgl_len(snents) * 8);
2483         transhdr_len += temp;
2484         transhdr_len = roundup(transhdr_len, 16);
2485
2486         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2487                                     transhdr_len, reqctx->op)) {
2488                 atomic_inc(&adap->chcr_stats.fallback);
2489                 chcr_aead_common_exit(req);
2490                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2491         }
2492         skb = alloc_skb(transhdr_len, flags);
2493         if (!skb) {
2494                 error = -ENOMEM;
2495                 goto err;
2496         }
2497
2498         chcr_req = __skb_put_zero(skb, transhdr_len);
2499
2500         temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2501
2502         /*
2503          * Input order  is AAD,IV and Payload. where IV should be included as
2504          * the part of authdata. All other fields should be filled according
2505          * to the hardware spec
2506          */
2507         chcr_req->sec_cpl.op_ivinsrtofst =
2508                                 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2509         chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2510         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2511                                         null ? 0 : 1 + IV,
2512                                         null ? 0 : IV + req->assoclen,
2513                                         req->assoclen + IV + 1,
2514                                         (temp & 0x1F0) >> 4);
2515         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2516                                         temp & 0xF,
2517                                         null ? 0 : req->assoclen + IV + 1,
2518                                         temp, temp);
2519         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2520             subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2521                 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2522         else
2523                 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2524         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2525                                         (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2526                                         temp,
2527                                         actx->auth_mode, aeadctx->hmac_ctrl,
2528                                         IV >> 1);
2529         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2530                                          0, 0, dst_size);
2531
2532         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2533         if (reqctx->op == CHCR_ENCRYPT_OP ||
2534                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2535                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2536                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2537                        aeadctx->enckey_len);
2538         else
2539                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2540                        aeadctx->enckey_len);
2541
2542         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2543                actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2544         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2545         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2546         ulptx = (struct ulptx_sgl *)(ivptr + IV);
2547         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2548             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2549                 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2550                 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2551                                 CTR_RFC3686_IV_SIZE);
2552                 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2553                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2554         } else {
2555                 memcpy(ivptr, req->iv, IV);
2556         }
2557         chcr_add_aead_dst_ent(req, phys_cpl, qid);
2558         chcr_add_aead_src_ent(req, ulptx);
2559         atomic_inc(&adap->chcr_stats.cipher_rqst);
2560         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2561                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2562         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2563                    transhdr_len, temp, 0);
2564         reqctx->skb = skb;
2565
2566         return skb;
2567 err:
2568         chcr_aead_common_exit(req);
2569
2570         return ERR_PTR(error);
2571 }
2572
2573 int chcr_aead_dma_map(struct device *dev,
2574                       struct aead_request *req,
2575                       unsigned short op_type)
2576 {
2577         int error;
2578         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2579         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2580         unsigned int authsize = crypto_aead_authsize(tfm);
2581         int src_len, dst_len;
2582
2583         /* calculate and handle src and dst sg length separately
2584          * for inplace and out-of place operations
2585          */
2586         if (req->src == req->dst) {
2587                 src_len = req->assoclen + req->cryptlen + (op_type ?
2588                                                         0 : authsize);
2589                 dst_len = src_len;
2590         } else {
2591                 src_len = req->assoclen + req->cryptlen;
2592                 dst_len = req->assoclen + req->cryptlen + (op_type ?
2593                                                         -authsize : authsize);
2594         }
2595
2596         if (!req->cryptlen || !src_len || !dst_len)
2597                 return 0;
2598         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2599                                         DMA_BIDIRECTIONAL);
2600         if (dma_mapping_error(dev, reqctx->iv_dma))
2601                 return -ENOMEM;
2602         if (reqctx->b0_len)
2603                 reqctx->b0_dma = reqctx->iv_dma + IV;
2604         else
2605                 reqctx->b0_dma = 0;
2606         if (req->src == req->dst) {
2607                 error = dma_map_sg(dev, req->src,
2608                                 sg_nents_for_len(req->src, src_len),
2609                                         DMA_BIDIRECTIONAL);
2610                 if (!error)
2611                         goto err;
2612         } else {
2613                 error = dma_map_sg(dev, req->src,
2614                                    sg_nents_for_len(req->src, src_len),
2615                                    DMA_TO_DEVICE);
2616                 if (!error)
2617                         goto err;
2618                 error = dma_map_sg(dev, req->dst,
2619                                    sg_nents_for_len(req->dst, dst_len),
2620                                    DMA_FROM_DEVICE);
2621                 if (!error) {
2622                         dma_unmap_sg(dev, req->src,
2623                                      sg_nents_for_len(req->src, src_len),
2624                                      DMA_TO_DEVICE);
2625                         goto err;
2626                 }
2627         }
2628
2629         return 0;
2630 err:
2631         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2632         return -ENOMEM;
2633 }
2634
2635 void chcr_aead_dma_unmap(struct device *dev,
2636                          struct aead_request *req,
2637                          unsigned short op_type)
2638 {
2639         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2640         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2641         unsigned int authsize = crypto_aead_authsize(tfm);
2642         int src_len, dst_len;
2643
2644         /* calculate and handle src and dst sg length separately
2645          * for inplace and out-of place operations
2646          */
2647         if (req->src == req->dst) {
2648                 src_len = req->assoclen + req->cryptlen + (op_type ?
2649                                                         0 : authsize);
2650                 dst_len = src_len;
2651         } else {
2652                 src_len = req->assoclen + req->cryptlen;
2653                 dst_len = req->assoclen + req->cryptlen + (op_type ?
2654                                                 -authsize : authsize);
2655         }
2656
2657         if (!req->cryptlen || !src_len || !dst_len)
2658                 return;
2659
2660         dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2661                                         DMA_BIDIRECTIONAL);
2662         if (req->src == req->dst) {
2663                 dma_unmap_sg(dev, req->src,
2664                              sg_nents_for_len(req->src, src_len),
2665                              DMA_BIDIRECTIONAL);
2666         } else {
2667                 dma_unmap_sg(dev, req->src,
2668                              sg_nents_for_len(req->src, src_len),
2669                              DMA_TO_DEVICE);
2670                 dma_unmap_sg(dev, req->dst,
2671                              sg_nents_for_len(req->dst, dst_len),
2672                              DMA_FROM_DEVICE);
2673         }
2674 }
2675
2676 void chcr_add_aead_src_ent(struct aead_request *req,
2677                            struct ulptx_sgl *ulptx)
2678 {
2679         struct ulptx_walk ulp_walk;
2680         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2681
2682         if (reqctx->imm) {
2683                 u8 *buf = (u8 *)ulptx;
2684
2685                 if (reqctx->b0_len) {
2686                         memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2687                         buf += reqctx->b0_len;
2688                 }
2689                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2690                                    buf, req->cryptlen + req->assoclen, 0);
2691         } else {
2692                 ulptx_walk_init(&ulp_walk, ulptx);
2693                 if (reqctx->b0_len)
2694                         ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2695                                             reqctx->b0_dma);
2696                 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2697                                   req->assoclen,  0);
2698                 ulptx_walk_end(&ulp_walk);
2699         }
2700 }
2701
2702 void chcr_add_aead_dst_ent(struct aead_request *req,
2703                            struct cpl_rx_phys_dsgl *phys_cpl,
2704                            unsigned short qid)
2705 {
2706         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2707         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2708         struct dsgl_walk dsgl_walk;
2709         unsigned int authsize = crypto_aead_authsize(tfm);
2710         struct chcr_context *ctx = a_ctx(tfm);
2711         u32 temp;
2712         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2713
2714         dsgl_walk_init(&dsgl_walk, phys_cpl);
2715         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2716         temp = req->assoclen + req->cryptlen +
2717                 (reqctx->op ? -authsize : authsize);
2718         dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2719         dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2720 }
2721
2722 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2723                              void *ulptx,
2724                              struct  cipher_wr_param *wrparam)
2725 {
2726         struct ulptx_walk ulp_walk;
2727         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2728         u8 *buf = ulptx;
2729
2730         memcpy(buf, reqctx->iv, IV);
2731         buf += IV;
2732         if (reqctx->imm) {
2733                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2734                                    buf, wrparam->bytes, reqctx->processed);
2735         } else {
2736                 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2737                 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2738                                   reqctx->src_ofst);
2739                 reqctx->srcsg = ulp_walk.last_sg;
2740                 reqctx->src_ofst = ulp_walk.last_sg_len;
2741                 ulptx_walk_end(&ulp_walk);
2742         }
2743 }
2744
2745 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2746                              struct cpl_rx_phys_dsgl *phys_cpl,
2747                              struct  cipher_wr_param *wrparam,
2748                              unsigned short qid)
2749 {
2750         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2751         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2752         struct chcr_context *ctx = c_ctx(tfm);
2753         struct dsgl_walk dsgl_walk;
2754         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2755
2756         dsgl_walk_init(&dsgl_walk, phys_cpl);
2757         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2758                          reqctx->dst_ofst);
2759         reqctx->dstsg = dsgl_walk.last_sg;
2760         reqctx->dst_ofst = dsgl_walk.last_sg_len;
2761         dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2762 }
2763
2764 void chcr_add_hash_src_ent(struct ahash_request *req,
2765                            struct ulptx_sgl *ulptx,
2766                            struct hash_wr_param *param)
2767 {
2768         struct ulptx_walk ulp_walk;
2769         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2770
2771         if (reqctx->hctx_wr.imm) {
2772                 u8 *buf = (u8 *)ulptx;
2773
2774                 if (param->bfr_len) {
2775                         memcpy(buf, reqctx->reqbfr, param->bfr_len);
2776                         buf += param->bfr_len;
2777                 }
2778
2779                 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2780                                    sg_nents(reqctx->hctx_wr.srcsg), buf,
2781                                    param->sg_len, 0);
2782         } else {
2783                 ulptx_walk_init(&ulp_walk, ulptx);
2784                 if (param->bfr_len)
2785                         ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2786                                             reqctx->hctx_wr.dma_addr);
2787                 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2788                                   param->sg_len, reqctx->hctx_wr.src_ofst);
2789                 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2790                 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2791                 ulptx_walk_end(&ulp_walk);
2792         }
2793 }
2794
2795 int chcr_hash_dma_map(struct device *dev,
2796                       struct ahash_request *req)
2797 {
2798         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2799         int error = 0;
2800
2801         if (!req->nbytes)
2802                 return 0;
2803         error = dma_map_sg(dev, req->src, sg_nents(req->src),
2804                            DMA_TO_DEVICE);
2805         if (!error)
2806                 return -ENOMEM;
2807         req_ctx->hctx_wr.is_sg_map = 1;
2808         return 0;
2809 }
2810
2811 void chcr_hash_dma_unmap(struct device *dev,
2812                          struct ahash_request *req)
2813 {
2814         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2815
2816         if (!req->nbytes)
2817                 return;
2818
2819         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2820                            DMA_TO_DEVICE);
2821         req_ctx->hctx_wr.is_sg_map = 0;
2822
2823 }
2824
2825 int chcr_cipher_dma_map(struct device *dev,
2826                         struct skcipher_request *req)
2827 {
2828         int error;
2829
2830         if (req->src == req->dst) {
2831                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2832                                    DMA_BIDIRECTIONAL);
2833                 if (!error)
2834                         goto err;
2835         } else {
2836                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2837                                    DMA_TO_DEVICE);
2838                 if (!error)
2839                         goto err;
2840                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2841                                    DMA_FROM_DEVICE);
2842                 if (!error) {
2843                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2844                                    DMA_TO_DEVICE);
2845                         goto err;
2846                 }
2847         }
2848
2849         return 0;
2850 err:
2851         return -ENOMEM;
2852 }
2853
2854 void chcr_cipher_dma_unmap(struct device *dev,
2855                            struct skcipher_request *req)
2856 {
2857         if (req->src == req->dst) {
2858                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2859                                    DMA_BIDIRECTIONAL);
2860         } else {
2861                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2862                                    DMA_TO_DEVICE);
2863                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2864                                    DMA_FROM_DEVICE);
2865         }
2866 }
2867
2868 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2869 {
2870         __be32 data;
2871
2872         memset(block, 0, csize);
2873         block += csize;
2874
2875         if (csize >= 4)
2876                 csize = 4;
2877         else if (msglen > (unsigned int)(1 << (8 * csize)))
2878                 return -EOVERFLOW;
2879
2880         data = cpu_to_be32(msglen);
2881         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2882
2883         return 0;
2884 }
2885
2886 static int generate_b0(struct aead_request *req, u8 *ivptr,
2887                         unsigned short op_type)
2888 {
2889         unsigned int l, lp, m;
2890         int rc;
2891         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2892         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2893         u8 *b0 = reqctx->scratch_pad;
2894
2895         m = crypto_aead_authsize(aead);
2896
2897         memcpy(b0, ivptr, 16);
2898
2899         lp = b0[0];
2900         l = lp + 1;
2901
2902         /* set m, bits 3-5 */
2903         *b0 |= (8 * ((m - 2) / 2));
2904
2905         /* set adata, bit 6, if associated data is used */
2906         if (req->assoclen)
2907                 *b0 |= 64;
2908         rc = set_msg_len(b0 + 16 - l,
2909                          (op_type == CHCR_DECRYPT_OP) ?
2910                          req->cryptlen - m : req->cryptlen, l);
2911
2912         return rc;
2913 }
2914
2915 static inline int crypto_ccm_check_iv(const u8 *iv)
2916 {
2917         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2918         if (iv[0] < 1 || iv[0] > 7)
2919                 return -EINVAL;
2920
2921         return 0;
2922 }
2923
2924 static int ccm_format_packet(struct aead_request *req,
2925                              u8 *ivptr,
2926                              unsigned int sub_type,
2927                              unsigned short op_type,
2928                              unsigned int assoclen)
2929 {
2930         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2931         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2932         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2933         int rc = 0;
2934
2935         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2936                 ivptr[0] = 3;
2937                 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2938                 memcpy(ivptr + 4, req->iv, 8);
2939                 memset(ivptr + 12, 0, 4);
2940         } else {
2941                 memcpy(ivptr, req->iv, 16);
2942         }
2943         if (assoclen)
2944                 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2945
2946         rc = generate_b0(req, ivptr, op_type);
2947         /* zero the ctr value */
2948         memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2949         return rc;
2950 }
2951
2952 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2953                                   unsigned int dst_size,
2954                                   struct aead_request *req,
2955                                   unsigned short op_type)
2956 {
2957         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2958         struct chcr_context *ctx = a_ctx(tfm);
2959         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2960         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2961         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2962         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2963         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2964         unsigned int ccm_xtra;
2965         unsigned int tag_offset = 0, auth_offset = 0;
2966         unsigned int assoclen;
2967
2968         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2969                 assoclen = req->assoclen - 8;
2970         else
2971                 assoclen = req->assoclen;
2972         ccm_xtra = CCM_B0_SIZE +
2973                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2974
2975         auth_offset = req->cryptlen ?
2976                 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2977         if (op_type == CHCR_DECRYPT_OP) {
2978                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2979                         tag_offset = crypto_aead_authsize(tfm);
2980                 else
2981                         auth_offset = 0;
2982         }
2983
2984         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2985         sec_cpl->pldlen =
2986                 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2987         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2988         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2989                                 1 + IV, IV + assoclen + ccm_xtra,
2990                                 req->assoclen + IV + 1 + ccm_xtra, 0);
2991
2992         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2993                                         auth_offset, tag_offset,
2994                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2995                                         crypto_aead_authsize(tfm));
2996         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2997                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2998                                         cipher_mode, mac_mode,
2999                                         aeadctx->hmac_ctrl, IV >> 1);
3000
3001         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3002                                         0, dst_size);
3003 }
3004
3005 static int aead_ccm_validate_input(unsigned short op_type,
3006                                    struct aead_request *req,
3007                                    struct chcr_aead_ctx *aeadctx,
3008                                    unsigned int sub_type)
3009 {
3010         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3011                 if (crypto_ccm_check_iv(req->iv)) {
3012                         pr_err("CCM: IV check fails\n");
3013                         return -EINVAL;
3014                 }
3015         } else {
3016                 if (req->assoclen != 16 && req->assoclen != 20) {
3017                         pr_err("RFC4309: Invalid AAD length %d\n",
3018                                req->assoclen);
3019                         return -EINVAL;
3020                 }
3021         }
3022         return 0;
3023 }
3024
3025 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3026                                           unsigned short qid,
3027                                           int size)
3028 {
3029         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3030         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3031         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3032         struct sk_buff *skb = NULL;
3033         struct chcr_wr *chcr_req;
3034         struct cpl_rx_phys_dsgl *phys_cpl;
3035         struct ulptx_sgl *ulptx;
3036         unsigned int transhdr_len;
3037         unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3038         unsigned int sub_type, assoclen = req->assoclen;
3039         unsigned int authsize = crypto_aead_authsize(tfm);
3040         int error = -EINVAL;
3041         u8 *ivptr;
3042         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3043                 GFP_ATOMIC;
3044         struct adapter *adap = padap(a_ctx(tfm)->dev);
3045
3046         sub_type = get_aead_subtype(tfm);
3047         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3048                 assoclen -= 8;
3049         reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3050         error = chcr_aead_common_init(req);
3051         if (error)
3052                 return ERR_PTR(error);
3053
3054         error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3055         if (error)
3056                 goto err;
3057         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3058                         + (reqctx->op ? -authsize : authsize),
3059                         CHCR_DST_SG_SIZE, 0);
3060         dnents += MIN_CCM_SG; // For IV and B0
3061         dst_size = get_space_for_phys_dsgl(dnents);
3062         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3063                                CHCR_SRC_SG_SIZE, 0);
3064         snents += MIN_CCM_SG; //For B0
3065         kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3066         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3067         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3068                        reqctx->b0_len) <= SGE_MAX_WR_LEN;
3069         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3070                                      reqctx->b0_len, 16) :
3071                 (sgl_len(snents) *  8);
3072         transhdr_len += temp;
3073         transhdr_len = roundup(transhdr_len, 16);
3074
3075         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3076                                 reqctx->b0_len, transhdr_len, reqctx->op)) {
3077                 atomic_inc(&adap->chcr_stats.fallback);
3078                 chcr_aead_common_exit(req);
3079                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3080         }
3081         skb = alloc_skb(transhdr_len,  flags);
3082
3083         if (!skb) {
3084                 error = -ENOMEM;
3085                 goto err;
3086         }
3087
3088         chcr_req = __skb_put_zero(skb, transhdr_len);
3089
3090         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3091
3092         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3093         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3094         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3095                         aeadctx->key, aeadctx->enckey_len);
3096
3097         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3098         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3099         ulptx = (struct ulptx_sgl *)(ivptr + IV);
3100         error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3101         if (error)
3102                 goto dstmap_fail;
3103         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3104         chcr_add_aead_src_ent(req, ulptx);
3105
3106         atomic_inc(&adap->chcr_stats.aead_rqst);
3107         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3108                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3109                 reqctx->b0_len) : 0);
3110         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3111                     transhdr_len, temp, 0);
3112         reqctx->skb = skb;
3113
3114         return skb;
3115 dstmap_fail:
3116         kfree_skb(skb);
3117 err:
3118         chcr_aead_common_exit(req);
3119         return ERR_PTR(error);
3120 }
3121
3122 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3123                                      unsigned short qid,
3124                                      int size)
3125 {
3126         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3127         struct chcr_context *ctx = a_ctx(tfm);
3128         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3129         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3130         struct sk_buff *skb = NULL;
3131         struct chcr_wr *chcr_req;
3132         struct cpl_rx_phys_dsgl *phys_cpl;
3133         struct ulptx_sgl *ulptx;
3134         unsigned int transhdr_len, dnents = 0, snents;
3135         unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3136         unsigned int authsize = crypto_aead_authsize(tfm);
3137         int error = -EINVAL;
3138         u8 *ivptr;
3139         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3140                 GFP_ATOMIC;
3141         struct adapter *adap = padap(ctx->dev);
3142         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3143
3144         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3145                 assoclen = req->assoclen - 8;
3146
3147         reqctx->b0_len = 0;
3148         error = chcr_aead_common_init(req);
3149         if (error)
3150                 return ERR_PTR(error);
3151         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3152                                 (reqctx->op ? -authsize : authsize),
3153                                 CHCR_DST_SG_SIZE, 0);
3154         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3155                                CHCR_SRC_SG_SIZE, 0);
3156         dnents += MIN_GCM_SG; // For IV
3157         dst_size = get_space_for_phys_dsgl(dnents);
3158         kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3159         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3160         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3161                         SGE_MAX_WR_LEN;
3162         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3163                 (sgl_len(snents) * 8);
3164         transhdr_len += temp;
3165         transhdr_len = roundup(transhdr_len, 16);
3166         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3167                             transhdr_len, reqctx->op)) {
3168
3169                 atomic_inc(&adap->chcr_stats.fallback);
3170                 chcr_aead_common_exit(req);
3171                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3172         }
3173         skb = alloc_skb(transhdr_len, flags);
3174         if (!skb) {
3175                 error = -ENOMEM;
3176                 goto err;
3177         }
3178
3179         chcr_req = __skb_put_zero(skb, transhdr_len);
3180
3181         //Offset of tag from end
3182         temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3183         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3184                                                 rx_channel_id, 2, 1);
3185         chcr_req->sec_cpl.pldlen =
3186                 htonl(req->assoclen + IV + req->cryptlen);
3187         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3188                                         assoclen ? 1 + IV : 0,
3189                                         assoclen ? IV + assoclen : 0,
3190                                         req->assoclen + IV + 1, 0);
3191         chcr_req->sec_cpl.cipherstop_lo_authinsert =
3192                         FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3193                                                 temp, temp);
3194         chcr_req->sec_cpl.seqno_numivs =
3195                         FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3196                                         CHCR_ENCRYPT_OP) ? 1 : 0,
3197                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
3198                                         CHCR_SCMD_AUTH_MODE_GHASH,
3199                                         aeadctx->hmac_ctrl, IV >> 1);
3200         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3201                                         0, 0, dst_size);
3202         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3203         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3204         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3205                GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3206
3207         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3208         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3209         /* prepare a 16 byte iv */
3210         /* S   A   L  T |  IV | 0x00000001 */
3211         if (get_aead_subtype(tfm) ==
3212             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3213                 memcpy(ivptr, aeadctx->salt, 4);
3214                 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3215         } else {
3216                 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3217         }
3218         put_unaligned_be32(0x01, &ivptr[12]);
3219         ulptx = (struct ulptx_sgl *)(ivptr + 16);
3220
3221         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3222         chcr_add_aead_src_ent(req, ulptx);
3223         atomic_inc(&adap->chcr_stats.aead_rqst);
3224         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3225                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3226         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3227                     transhdr_len, temp, reqctx->verify);
3228         reqctx->skb = skb;
3229         return skb;
3230
3231 err:
3232         chcr_aead_common_exit(req);
3233         return ERR_PTR(error);
3234 }
3235
3236
3237
3238 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3239 {
3240         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3241         struct aead_alg *alg = crypto_aead_alg(tfm);
3242
3243         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3244                                                CRYPTO_ALG_NEED_FALLBACK |
3245                                                CRYPTO_ALG_ASYNC);
3246         if  (IS_ERR(aeadctx->sw_cipher))
3247                 return PTR_ERR(aeadctx->sw_cipher);
3248         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3249                                  sizeof(struct aead_request) +
3250                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
3251         return chcr_device_init(a_ctx(tfm));
3252 }
3253
3254 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3255 {
3256         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3257
3258         crypto_free_aead(aeadctx->sw_cipher);
3259 }
3260
3261 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3262                                         unsigned int authsize)
3263 {
3264         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3265
3266         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3267         aeadctx->mayverify = VERIFY_HW;
3268         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3269 }
3270 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3271                                     unsigned int authsize)
3272 {
3273         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3274         u32 maxauth = crypto_aead_maxauthsize(tfm);
3275
3276         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3277          * true for sha1. authsize == 12 condition should be before
3278          * authsize == (maxauth >> 1)
3279          */
3280         if (authsize == ICV_4) {
3281                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3282                 aeadctx->mayverify = VERIFY_HW;
3283         } else if (authsize == ICV_6) {
3284                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3285                 aeadctx->mayverify = VERIFY_HW;
3286         } else if (authsize == ICV_10) {
3287                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3288                 aeadctx->mayverify = VERIFY_HW;
3289         } else if (authsize == ICV_12) {
3290                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3291                 aeadctx->mayverify = VERIFY_HW;
3292         } else if (authsize == ICV_14) {
3293                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3294                 aeadctx->mayverify = VERIFY_HW;
3295         } else if (authsize == (maxauth >> 1)) {
3296                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3297                 aeadctx->mayverify = VERIFY_HW;
3298         } else if (authsize == maxauth) {
3299                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3300                 aeadctx->mayverify = VERIFY_HW;
3301         } else {
3302                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3303                 aeadctx->mayverify = VERIFY_SW;
3304         }
3305         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3306 }
3307
3308
3309 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3310 {
3311         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3312
3313         switch (authsize) {
3314         case ICV_4:
3315                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3316                 aeadctx->mayverify = VERIFY_HW;
3317                 break;
3318         case ICV_8:
3319                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3320                 aeadctx->mayverify = VERIFY_HW;
3321                 break;
3322         case ICV_12:
3323                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3324                 aeadctx->mayverify = VERIFY_HW;
3325                 break;
3326         case ICV_14:
3327                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3328                 aeadctx->mayverify = VERIFY_HW;
3329                 break;
3330         case ICV_16:
3331                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3332                 aeadctx->mayverify = VERIFY_HW;
3333                 break;
3334         case ICV_13:
3335         case ICV_15:
3336                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3337                 aeadctx->mayverify = VERIFY_SW;
3338                 break;
3339         default:
3340                 return -EINVAL;
3341         }
3342         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3343 }
3344
3345 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3346                                           unsigned int authsize)
3347 {
3348         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3349
3350         switch (authsize) {
3351         case ICV_8:
3352                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3353                 aeadctx->mayverify = VERIFY_HW;
3354                 break;
3355         case ICV_12:
3356                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3357                 aeadctx->mayverify = VERIFY_HW;
3358                 break;
3359         case ICV_16:
3360                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3361                 aeadctx->mayverify = VERIFY_HW;
3362                 break;
3363         default:
3364                 return -EINVAL;
3365         }
3366         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3367 }
3368
3369 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3370                                 unsigned int authsize)
3371 {
3372         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3373
3374         switch (authsize) {
3375         case ICV_4:
3376                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3377                 aeadctx->mayverify = VERIFY_HW;
3378                 break;
3379         case ICV_6:
3380                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3381                 aeadctx->mayverify = VERIFY_HW;
3382                 break;
3383         case ICV_8:
3384                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3385                 aeadctx->mayverify = VERIFY_HW;
3386                 break;
3387         case ICV_10:
3388                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3389                 aeadctx->mayverify = VERIFY_HW;
3390                 break;
3391         case ICV_12:
3392                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3393                 aeadctx->mayverify = VERIFY_HW;
3394                 break;
3395         case ICV_14:
3396                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3397                 aeadctx->mayverify = VERIFY_HW;
3398                 break;
3399         case ICV_16:
3400                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3401                 aeadctx->mayverify = VERIFY_HW;
3402                 break;
3403         default:
3404                 return -EINVAL;
3405         }
3406         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3407 }
3408
3409 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3410                                 const u8 *key,
3411                                 unsigned int keylen)
3412 {
3413         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3414         unsigned char ck_size, mk_size;
3415         int key_ctx_size = 0;
3416
3417         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3418         if (keylen == AES_KEYSIZE_128) {
3419                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3420                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3421         } else if (keylen == AES_KEYSIZE_192) {
3422                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3423                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3424         } else if (keylen == AES_KEYSIZE_256) {
3425                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3426                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3427         } else {
3428                 aeadctx->enckey_len = 0;
3429                 return  -EINVAL;
3430         }
3431         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3432                                                 key_ctx_size >> 4);
3433         memcpy(aeadctx->key, key, keylen);
3434         aeadctx->enckey_len = keylen;
3435
3436         return 0;
3437 }
3438
3439 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3440                                 const u8 *key,
3441                                 unsigned int keylen)
3442 {
3443         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3444         int error;
3445
3446         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3447         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3448                               CRYPTO_TFM_REQ_MASK);
3449         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3450         if (error)
3451                 return error;
3452         return chcr_ccm_common_setkey(aead, key, keylen);
3453 }
3454
3455 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3456                                     unsigned int keylen)
3457 {
3458         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3459         int error;
3460
3461         if (keylen < 3) {
3462                 aeadctx->enckey_len = 0;
3463                 return  -EINVAL;
3464         }
3465         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3466         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3467                               CRYPTO_TFM_REQ_MASK);
3468         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3469         if (error)
3470                 return error;
3471         keylen -= 3;
3472         memcpy(aeadctx->salt, key + keylen, 3);
3473         return chcr_ccm_common_setkey(aead, key, keylen);
3474 }
3475
3476 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3477                            unsigned int keylen)
3478 {
3479         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3480         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3481         unsigned int ck_size;
3482         int ret = 0, key_ctx_size = 0;
3483         struct crypto_aes_ctx aes;
3484
3485         aeadctx->enckey_len = 0;
3486         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3487         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3488                               & CRYPTO_TFM_REQ_MASK);
3489         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3490         if (ret)
3491                 goto out;
3492
3493         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3494             keylen > 3) {
3495                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3496                 memcpy(aeadctx->salt, key + keylen, 4);
3497         }
3498         if (keylen == AES_KEYSIZE_128) {
3499                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3500         } else if (keylen == AES_KEYSIZE_192) {
3501                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3502         } else if (keylen == AES_KEYSIZE_256) {
3503                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3504         } else {
3505                 pr_err("GCM: Invalid key length %d\n", keylen);
3506                 ret = -EINVAL;
3507                 goto out;
3508         }
3509
3510         memcpy(aeadctx->key, key, keylen);
3511         aeadctx->enckey_len = keylen;
3512         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3513                 AEAD_H_SIZE;
3514         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3515                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3516                                                 0, 0,
3517                                                 key_ctx_size >> 4);
3518         /* Calculate the H = CIPH(K, 0 repeated 16 times).
3519          * It will go in key context
3520          */
3521         ret = aes_expandkey(&aes, key, keylen);
3522         if (ret) {
3523                 aeadctx->enckey_len = 0;
3524                 goto out;
3525         }
3526         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3527         aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3528         memzero_explicit(&aes, sizeof(aes));
3529
3530 out:
3531         return ret;
3532 }
3533
3534 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3535                                    unsigned int keylen)
3536 {
3537         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3538         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3539         /* it contains auth and cipher key both*/
3540         struct crypto_authenc_keys keys;
3541         unsigned int bs, subtype;
3542         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3543         int err = 0, i, key_ctx_len = 0;
3544         unsigned char ck_size = 0;
3545         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3546         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3547         struct algo_param param;
3548         int align;
3549         u8 *o_ptr = NULL;
3550
3551         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3552         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3553                               & CRYPTO_TFM_REQ_MASK);
3554         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3555         if (err)
3556                 goto out;
3557
3558         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3559                 goto out;
3560
3561         if (get_alg_config(&param, max_authsize)) {
3562                 pr_err("Unsupported digest size\n");
3563                 goto out;
3564         }
3565         subtype = get_aead_subtype(authenc);
3566         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3567                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3568                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3569                         goto out;
3570                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3571                 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3572                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3573         }
3574         if (keys.enckeylen == AES_KEYSIZE_128) {
3575                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3576         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3577                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3578         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3579                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3580         } else {
3581                 pr_err("Unsupported cipher key\n");
3582                 goto out;
3583         }
3584
3585         /* Copy only encryption key. We use authkey to generate h(ipad) and
3586          * h(opad) so authkey is not needed again. authkeylen size have the
3587          * size of the hash digest size.
3588          */
3589         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3590         aeadctx->enckey_len = keys.enckeylen;
3591         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3592                 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3593
3594                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3595                             aeadctx->enckey_len << 3);
3596         }
3597         base_hash  = chcr_alloc_shash(max_authsize);
3598         if (IS_ERR(base_hash)) {
3599                 pr_err("Base driver cannot be loaded\n");
3600                 goto out;
3601         }
3602         {
3603                 SHASH_DESC_ON_STACK(shash, base_hash);
3604
3605                 shash->tfm = base_hash;
3606                 bs = crypto_shash_blocksize(base_hash);
3607                 align = KEYCTX_ALIGN_PAD(max_authsize);
3608                 o_ptr =  actx->h_iopad + param.result_size + align;
3609
3610                 if (keys.authkeylen > bs) {
3611                         err = crypto_shash_digest(shash, keys.authkey,
3612                                                   keys.authkeylen,
3613                                                   o_ptr);
3614                         if (err) {
3615                                 pr_err("Base driver cannot be loaded\n");
3616                                 goto out;
3617                         }
3618                         keys.authkeylen = max_authsize;
3619                 } else
3620                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
3621
3622                 /* Compute the ipad-digest*/
3623                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3624                 memcpy(pad, o_ptr, keys.authkeylen);
3625                 for (i = 0; i < bs >> 2; i++)
3626                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3627
3628                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3629                                               max_authsize))
3630                         goto out;
3631                 /* Compute the opad-digest */
3632                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3633                 memcpy(pad, o_ptr, keys.authkeylen);
3634                 for (i = 0; i < bs >> 2; i++)
3635                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3636
3637                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3638                         goto out;
3639
3640                 /* convert the ipad and opad digest to network order */
3641                 chcr_change_order(actx->h_iopad, param.result_size);
3642                 chcr_change_order(o_ptr, param.result_size);
3643                 key_ctx_len = sizeof(struct _key_ctx) +
3644                         roundup(keys.enckeylen, 16) +
3645                         (param.result_size + align) * 2;
3646                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3647                                                 0, 1, key_ctx_len >> 4);
3648                 actx->auth_mode = param.auth_mode;
3649                 chcr_free_shash(base_hash);
3650
3651                 memzero_explicit(&keys, sizeof(keys));
3652                 return 0;
3653         }
3654 out:
3655         aeadctx->enckey_len = 0;
3656         memzero_explicit(&keys, sizeof(keys));
3657         if (!IS_ERR(base_hash))
3658                 chcr_free_shash(base_hash);
3659         return -EINVAL;
3660 }
3661
3662 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3663                                         const u8 *key, unsigned int keylen)
3664 {
3665         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3666         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3667         struct crypto_authenc_keys keys;
3668         int err;
3669         /* it contains auth and cipher key both*/
3670         unsigned int subtype;
3671         int key_ctx_len = 0;
3672         unsigned char ck_size = 0;
3673
3674         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3675         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3676                               & CRYPTO_TFM_REQ_MASK);
3677         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3678         if (err)
3679                 goto out;
3680
3681         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3682                 goto out;
3683
3684         subtype = get_aead_subtype(authenc);
3685         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3686             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3687                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3688                         goto out;
3689                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3690                         - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3691                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3692         }
3693         if (keys.enckeylen == AES_KEYSIZE_128) {
3694                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3695         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3696                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3697         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3698                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3699         } else {
3700                 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3701                 goto out;
3702         }
3703         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3704         aeadctx->enckey_len = keys.enckeylen;
3705         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3706             subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3707                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3708                                 aeadctx->enckey_len << 3);
3709         }
3710         key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3711
3712         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3713                                                 0, key_ctx_len >> 4);
3714         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3715         memzero_explicit(&keys, sizeof(keys));
3716         return 0;
3717 out:
3718         aeadctx->enckey_len = 0;
3719         memzero_explicit(&keys, sizeof(keys));
3720         return -EINVAL;
3721 }
3722
3723 static int chcr_aead_op(struct aead_request *req,
3724                         int size,
3725                         create_wr_t create_wr_fn)
3726 {
3727         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3728         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3729         struct chcr_context *ctx = a_ctx(tfm);
3730         struct uld_ctx *u_ctx = ULD_CTX(ctx);
3731         struct sk_buff *skb;
3732         struct chcr_dev *cdev;
3733
3734         cdev = a_ctx(tfm)->dev;
3735         if (!cdev) {
3736                 pr_err("%s : No crypto device.\n", __func__);
3737                 return -ENXIO;
3738         }
3739
3740         if (chcr_inc_wrcount(cdev)) {
3741         /* Detach state for CHCR means lldi or padap is freed.
3742          * We cannot increment fallback here.
3743          */
3744                 return chcr_aead_fallback(req, reqctx->op);
3745         }
3746
3747         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3748                                         reqctx->txqidx) &&
3749                 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3750                         chcr_dec_wrcount(cdev);
3751                         return -ENOSPC;
3752         }
3753
3754         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3755             crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3756                 pr_err("RFC4106: Invalid value of assoclen %d\n",
3757                        req->assoclen);
3758                 return -EINVAL;
3759         }
3760
3761         /* Form a WR from req */
3762         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3763
3764         if (IS_ERR_OR_NULL(skb)) {
3765                 chcr_dec_wrcount(cdev);
3766                 return PTR_ERR_OR_ZERO(skb);
3767         }
3768
3769         skb->dev = u_ctx->lldi.ports[0];
3770         set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3771         chcr_send_wr(skb);
3772         return -EINPROGRESS;
3773 }
3774
3775 static int chcr_aead_encrypt(struct aead_request *req)
3776 {
3777         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3778         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3779         struct chcr_context *ctx = a_ctx(tfm);
3780         unsigned int cpu;
3781
3782         cpu = get_cpu();
3783         reqctx->txqidx = cpu % ctx->ntxq;
3784         reqctx->rxqidx = cpu % ctx->nrxq;
3785         put_cpu();
3786
3787         reqctx->verify = VERIFY_HW;
3788         reqctx->op = CHCR_ENCRYPT_OP;
3789
3790         switch (get_aead_subtype(tfm)) {
3791         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3792         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3793         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3794         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3795                 return chcr_aead_op(req, 0, create_authenc_wr);
3796         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3797         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3798                 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3799         default:
3800                 return chcr_aead_op(req, 0, create_gcm_wr);
3801         }
3802 }
3803
3804 static int chcr_aead_decrypt(struct aead_request *req)
3805 {
3806         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3807         struct chcr_context *ctx = a_ctx(tfm);
3808         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3809         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3810         int size;
3811         unsigned int cpu;
3812
3813         cpu = get_cpu();
3814         reqctx->txqidx = cpu % ctx->ntxq;
3815         reqctx->rxqidx = cpu % ctx->nrxq;
3816         put_cpu();
3817
3818         if (aeadctx->mayverify == VERIFY_SW) {
3819                 size = crypto_aead_maxauthsize(tfm);
3820                 reqctx->verify = VERIFY_SW;
3821         } else {
3822                 size = 0;
3823                 reqctx->verify = VERIFY_HW;
3824         }
3825         reqctx->op = CHCR_DECRYPT_OP;
3826         switch (get_aead_subtype(tfm)) {
3827         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3828         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3829         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3830         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3831                 return chcr_aead_op(req, size, create_authenc_wr);
3832         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3833         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3834                 return chcr_aead_op(req, size, create_aead_ccm_wr);
3835         default:
3836                 return chcr_aead_op(req, size, create_gcm_wr);
3837         }
3838 }
3839
3840 static struct chcr_alg_template driver_algs[] = {
3841         /* AES-CBC */
3842         {
3843                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3844                 .is_registered = 0,
3845                 .alg.skcipher = {
3846                         .base.cra_name          = "cbc(aes)",
3847                         .base.cra_driver_name   = "cbc-aes-chcr",
3848                         .base.cra_blocksize     = AES_BLOCK_SIZE,
3849
3850                         .init                   = chcr_init_tfm,
3851                         .exit                   = chcr_exit_tfm,
3852                         .min_keysize            = AES_MIN_KEY_SIZE,
3853                         .max_keysize            = AES_MAX_KEY_SIZE,
3854                         .ivsize                 = AES_BLOCK_SIZE,
3855                         .setkey                 = chcr_aes_cbc_setkey,
3856                         .encrypt                = chcr_aes_encrypt,
3857                         .decrypt                = chcr_aes_decrypt,
3858                         }
3859         },
3860         {
3861                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3862                 .is_registered = 0,
3863                 .alg.skcipher = {
3864                         .base.cra_name          = "xts(aes)",
3865                         .base.cra_driver_name   = "xts-aes-chcr",
3866                         .base.cra_blocksize     = AES_BLOCK_SIZE,
3867
3868                         .init                   = chcr_init_tfm,
3869                         .exit                   = chcr_exit_tfm,
3870                         .min_keysize            = 2 * AES_MIN_KEY_SIZE,
3871                         .max_keysize            = 2 * AES_MAX_KEY_SIZE,
3872                         .ivsize                 = AES_BLOCK_SIZE,
3873                         .setkey                 = chcr_aes_xts_setkey,
3874                         .encrypt                = chcr_aes_encrypt,
3875                         .decrypt                = chcr_aes_decrypt,
3876                         }
3877         },
3878         {
3879                 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3880                 .is_registered = 0,
3881                 .alg.skcipher = {
3882                         .base.cra_name          = "ctr(aes)",
3883                         .base.cra_driver_name   = "ctr-aes-chcr",
3884                         .base.cra_blocksize     = 1,
3885
3886                         .init                   = chcr_init_tfm,
3887                         .exit                   = chcr_exit_tfm,
3888                         .min_keysize            = AES_MIN_KEY_SIZE,
3889                         .max_keysize            = AES_MAX_KEY_SIZE,
3890                         .ivsize                 = AES_BLOCK_SIZE,
3891                         .setkey                 = chcr_aes_ctr_setkey,
3892                         .encrypt                = chcr_aes_encrypt,
3893                         .decrypt                = chcr_aes_decrypt,
3894                 }
3895         },
3896         {
3897                 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3898                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3899                 .is_registered = 0,
3900                 .alg.skcipher = {
3901                         .base.cra_name          = "rfc3686(ctr(aes))",
3902                         .base.cra_driver_name   = "rfc3686-ctr-aes-chcr",
3903                         .base.cra_blocksize     = 1,
3904
3905                         .init                   = chcr_rfc3686_init,
3906                         .exit                   = chcr_exit_tfm,
3907                         .min_keysize            = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3908                         .max_keysize            = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3909                         .ivsize                 = CTR_RFC3686_IV_SIZE,
3910                         .setkey                 = chcr_aes_rfc3686_setkey,
3911                         .encrypt                = chcr_aes_encrypt,
3912                         .decrypt                = chcr_aes_decrypt,
3913                 }
3914         },
3915         /* SHA */
3916         {
3917                 .type = CRYPTO_ALG_TYPE_AHASH,
3918                 .is_registered = 0,
3919                 .alg.hash = {
3920                         .halg.digestsize = SHA1_DIGEST_SIZE,
3921                         .halg.base = {
3922                                 .cra_name = "sha1",
3923                                 .cra_driver_name = "sha1-chcr",
3924                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3925                         }
3926                 }
3927         },
3928         {
3929                 .type = CRYPTO_ALG_TYPE_AHASH,
3930                 .is_registered = 0,
3931                 .alg.hash = {
3932                         .halg.digestsize = SHA256_DIGEST_SIZE,
3933                         .halg.base = {
3934                                 .cra_name = "sha256",
3935                                 .cra_driver_name = "sha256-chcr",
3936                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3937                         }
3938                 }
3939         },
3940         {
3941                 .type = CRYPTO_ALG_TYPE_AHASH,
3942                 .is_registered = 0,
3943                 .alg.hash = {
3944                         .halg.digestsize = SHA224_DIGEST_SIZE,
3945                         .halg.base = {
3946                                 .cra_name = "sha224",
3947                                 .cra_driver_name = "sha224-chcr",
3948                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3949                         }
3950                 }
3951         },
3952         {
3953                 .type = CRYPTO_ALG_TYPE_AHASH,
3954                 .is_registered = 0,
3955                 .alg.hash = {
3956                         .halg.digestsize = SHA384_DIGEST_SIZE,
3957                         .halg.base = {
3958                                 .cra_name = "sha384",
3959                                 .cra_driver_name = "sha384-chcr",
3960                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3961                         }
3962                 }
3963         },
3964         {
3965                 .type = CRYPTO_ALG_TYPE_AHASH,
3966                 .is_registered = 0,
3967                 .alg.hash = {
3968                         .halg.digestsize = SHA512_DIGEST_SIZE,
3969                         .halg.base = {
3970                                 .cra_name = "sha512",
3971                                 .cra_driver_name = "sha512-chcr",
3972                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3973                         }
3974                 }
3975         },
3976         /* HMAC */
3977         {
3978                 .type = CRYPTO_ALG_TYPE_HMAC,
3979                 .is_registered = 0,
3980                 .alg.hash = {
3981                         .halg.digestsize = SHA1_DIGEST_SIZE,
3982                         .halg.base = {
3983                                 .cra_name = "hmac(sha1)",
3984                                 .cra_driver_name = "hmac-sha1-chcr",
3985                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3986                         }
3987                 }
3988         },
3989         {
3990                 .type = CRYPTO_ALG_TYPE_HMAC,
3991                 .is_registered = 0,
3992                 .alg.hash = {
3993                         .halg.digestsize = SHA224_DIGEST_SIZE,
3994                         .halg.base = {
3995                                 .cra_name = "hmac(sha224)",
3996                                 .cra_driver_name = "hmac-sha224-chcr",
3997                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3998                         }
3999                 }
4000         },
4001         {
4002                 .type = CRYPTO_ALG_TYPE_HMAC,
4003                 .is_registered = 0,
4004                 .alg.hash = {
4005                         .halg.digestsize = SHA256_DIGEST_SIZE,
4006                         .halg.base = {
4007                                 .cra_name = "hmac(sha256)",
4008                                 .cra_driver_name = "hmac-sha256-chcr",
4009                                 .cra_blocksize = SHA256_BLOCK_SIZE,
4010                         }
4011                 }
4012         },
4013         {
4014                 .type = CRYPTO_ALG_TYPE_HMAC,
4015                 .is_registered = 0,
4016                 .alg.hash = {
4017                         .halg.digestsize = SHA384_DIGEST_SIZE,
4018                         .halg.base = {
4019                                 .cra_name = "hmac(sha384)",
4020                                 .cra_driver_name = "hmac-sha384-chcr",
4021                                 .cra_blocksize = SHA384_BLOCK_SIZE,
4022                         }
4023                 }
4024         },
4025         {
4026                 .type = CRYPTO_ALG_TYPE_HMAC,
4027                 .is_registered = 0,
4028                 .alg.hash = {
4029                         .halg.digestsize = SHA512_DIGEST_SIZE,
4030                         .halg.base = {
4031                                 .cra_name = "hmac(sha512)",
4032                                 .cra_driver_name = "hmac-sha512-chcr",
4033                                 .cra_blocksize = SHA512_BLOCK_SIZE,
4034                         }
4035                 }
4036         },
4037         /* Add AEAD Algorithms */
4038         {
4039                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4040                 .is_registered = 0,
4041                 .alg.aead = {
4042                         .base = {
4043                                 .cra_name = "gcm(aes)",
4044                                 .cra_driver_name = "gcm-aes-chcr",
4045                                 .cra_blocksize  = 1,
4046                                 .cra_priority = CHCR_AEAD_PRIORITY,
4047                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4048                                                 sizeof(struct chcr_aead_ctx) +
4049                                                 sizeof(struct chcr_gcm_ctx),
4050                         },
4051                         .ivsize = GCM_AES_IV_SIZE,
4052                         .maxauthsize = GHASH_DIGEST_SIZE,
4053                         .setkey = chcr_gcm_setkey,
4054                         .setauthsize = chcr_gcm_setauthsize,
4055                 }
4056         },
4057         {
4058                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4059                 .is_registered = 0,
4060                 .alg.aead = {
4061                         .base = {
4062                                 .cra_name = "rfc4106(gcm(aes))",
4063                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4064                                 .cra_blocksize   = 1,
4065                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4066                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4067                                                 sizeof(struct chcr_aead_ctx) +
4068                                                 sizeof(struct chcr_gcm_ctx),
4069
4070                         },
4071                         .ivsize = GCM_RFC4106_IV_SIZE,
4072                         .maxauthsize    = GHASH_DIGEST_SIZE,
4073                         .setkey = chcr_gcm_setkey,
4074                         .setauthsize    = chcr_4106_4309_setauthsize,
4075                 }
4076         },
4077         {
4078                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4079                 .is_registered = 0,
4080                 .alg.aead = {
4081                         .base = {
4082                                 .cra_name = "ccm(aes)",
4083                                 .cra_driver_name = "ccm-aes-chcr",
4084                                 .cra_blocksize   = 1,
4085                                 .cra_priority = CHCR_AEAD_PRIORITY,
4086                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4087                                                 sizeof(struct chcr_aead_ctx),
4088
4089                         },
4090                         .ivsize = AES_BLOCK_SIZE,
4091                         .maxauthsize    = GHASH_DIGEST_SIZE,
4092                         .setkey = chcr_aead_ccm_setkey,
4093                         .setauthsize    = chcr_ccm_setauthsize,
4094                 }
4095         },
4096         {
4097                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4098                 .is_registered = 0,
4099                 .alg.aead = {
4100                         .base = {
4101                                 .cra_name = "rfc4309(ccm(aes))",
4102                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4103                                 .cra_blocksize   = 1,
4104                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4105                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4106                                                 sizeof(struct chcr_aead_ctx),
4107
4108                         },
4109                         .ivsize = 8,
4110                         .maxauthsize    = GHASH_DIGEST_SIZE,
4111                         .setkey = chcr_aead_rfc4309_setkey,
4112                         .setauthsize = chcr_4106_4309_setauthsize,
4113                 }
4114         },
4115         {
4116                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4117                 .is_registered = 0,
4118                 .alg.aead = {
4119                         .base = {
4120                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4121                                 .cra_driver_name =
4122                                         "authenc-hmac-sha1-cbc-aes-chcr",
4123                                 .cra_blocksize   = AES_BLOCK_SIZE,
4124                                 .cra_priority = CHCR_AEAD_PRIORITY,
4125                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4126                                                 sizeof(struct chcr_aead_ctx) +
4127                                                 sizeof(struct chcr_authenc_ctx),
4128
4129                         },
4130                         .ivsize = AES_BLOCK_SIZE,
4131                         .maxauthsize = SHA1_DIGEST_SIZE,
4132                         .setkey = chcr_authenc_setkey,
4133                         .setauthsize = chcr_authenc_setauthsize,
4134                 }
4135         },
4136         {
4137                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4138                 .is_registered = 0,
4139                 .alg.aead = {
4140                         .base = {
4141
4142                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4143                                 .cra_driver_name =
4144                                         "authenc-hmac-sha256-cbc-aes-chcr",
4145                                 .cra_blocksize   = AES_BLOCK_SIZE,
4146                                 .cra_priority = CHCR_AEAD_PRIORITY,
4147                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4148                                                 sizeof(struct chcr_aead_ctx) +
4149                                                 sizeof(struct chcr_authenc_ctx),
4150
4151                         },
4152                         .ivsize = AES_BLOCK_SIZE,
4153                         .maxauthsize    = SHA256_DIGEST_SIZE,
4154                         .setkey = chcr_authenc_setkey,
4155                         .setauthsize = chcr_authenc_setauthsize,
4156                 }
4157         },
4158         {
4159                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4160                 .is_registered = 0,
4161                 .alg.aead = {
4162                         .base = {
4163                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4164                                 .cra_driver_name =
4165                                         "authenc-hmac-sha224-cbc-aes-chcr",
4166                                 .cra_blocksize   = AES_BLOCK_SIZE,
4167                                 .cra_priority = CHCR_AEAD_PRIORITY,
4168                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4169                                                 sizeof(struct chcr_aead_ctx) +
4170                                                 sizeof(struct chcr_authenc_ctx),
4171                         },
4172                         .ivsize = AES_BLOCK_SIZE,
4173                         .maxauthsize = SHA224_DIGEST_SIZE,
4174                         .setkey = chcr_authenc_setkey,
4175                         .setauthsize = chcr_authenc_setauthsize,
4176                 }
4177         },
4178         {
4179                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4180                 .is_registered = 0,
4181                 .alg.aead = {
4182                         .base = {
4183                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4184                                 .cra_driver_name =
4185                                         "authenc-hmac-sha384-cbc-aes-chcr",
4186                                 .cra_blocksize   = AES_BLOCK_SIZE,
4187                                 .cra_priority = CHCR_AEAD_PRIORITY,
4188                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4189                                                 sizeof(struct chcr_aead_ctx) +
4190                                                 sizeof(struct chcr_authenc_ctx),
4191
4192                         },
4193                         .ivsize = AES_BLOCK_SIZE,
4194                         .maxauthsize = SHA384_DIGEST_SIZE,
4195                         .setkey = chcr_authenc_setkey,
4196                         .setauthsize = chcr_authenc_setauthsize,
4197                 }
4198         },
4199         {
4200                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4201                 .is_registered = 0,
4202                 .alg.aead = {
4203                         .base = {
4204                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4205                                 .cra_driver_name =
4206                                         "authenc-hmac-sha512-cbc-aes-chcr",
4207                                 .cra_blocksize   = AES_BLOCK_SIZE,
4208                                 .cra_priority = CHCR_AEAD_PRIORITY,
4209                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4210                                                 sizeof(struct chcr_aead_ctx) +
4211                                                 sizeof(struct chcr_authenc_ctx),
4212
4213                         },
4214                         .ivsize = AES_BLOCK_SIZE,
4215                         .maxauthsize = SHA512_DIGEST_SIZE,
4216                         .setkey = chcr_authenc_setkey,
4217                         .setauthsize = chcr_authenc_setauthsize,
4218                 }
4219         },
4220         {
4221                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4222                 .is_registered = 0,
4223                 .alg.aead = {
4224                         .base = {
4225                                 .cra_name = "authenc(digest_null,cbc(aes))",
4226                                 .cra_driver_name =
4227                                         "authenc-digest_null-cbc-aes-chcr",
4228                                 .cra_blocksize   = AES_BLOCK_SIZE,
4229                                 .cra_priority = CHCR_AEAD_PRIORITY,
4230                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4231                                                 sizeof(struct chcr_aead_ctx) +
4232                                                 sizeof(struct chcr_authenc_ctx),
4233
4234                         },
4235                         .ivsize  = AES_BLOCK_SIZE,
4236                         .maxauthsize = 0,
4237                         .setkey  = chcr_aead_digest_null_setkey,
4238                         .setauthsize = chcr_authenc_null_setauthsize,
4239                 }
4240         },
4241         {
4242                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4243                 .is_registered = 0,
4244                 .alg.aead = {
4245                         .base = {
4246                                 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4247                                 .cra_driver_name =
4248                                 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4249                                 .cra_blocksize   = 1,
4250                                 .cra_priority = CHCR_AEAD_PRIORITY,
4251                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4252                                                 sizeof(struct chcr_aead_ctx) +
4253                                                 sizeof(struct chcr_authenc_ctx),
4254
4255                         },
4256                         .ivsize = CTR_RFC3686_IV_SIZE,
4257                         .maxauthsize = SHA1_DIGEST_SIZE,
4258                         .setkey = chcr_authenc_setkey,
4259                         .setauthsize = chcr_authenc_setauthsize,
4260                 }
4261         },
4262         {
4263                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4264                 .is_registered = 0,
4265                 .alg.aead = {
4266                         .base = {
4267
4268                                 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4269                                 .cra_driver_name =
4270                                 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4271                                 .cra_blocksize   = 1,
4272                                 .cra_priority = CHCR_AEAD_PRIORITY,
4273                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4274                                                 sizeof(struct chcr_aead_ctx) +
4275                                                 sizeof(struct chcr_authenc_ctx),
4276
4277                         },
4278                         .ivsize = CTR_RFC3686_IV_SIZE,
4279                         .maxauthsize    = SHA256_DIGEST_SIZE,
4280                         .setkey = chcr_authenc_setkey,
4281                         .setauthsize = chcr_authenc_setauthsize,
4282                 }
4283         },
4284         {
4285                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4286                 .is_registered = 0,
4287                 .alg.aead = {
4288                         .base = {
4289                                 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4290                                 .cra_driver_name =
4291                                 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4292                                 .cra_blocksize   = 1,
4293                                 .cra_priority = CHCR_AEAD_PRIORITY,
4294                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4295                                                 sizeof(struct chcr_aead_ctx) +
4296                                                 sizeof(struct chcr_authenc_ctx),
4297                         },
4298                         .ivsize = CTR_RFC3686_IV_SIZE,
4299                         .maxauthsize = SHA224_DIGEST_SIZE,
4300                         .setkey = chcr_authenc_setkey,
4301                         .setauthsize = chcr_authenc_setauthsize,
4302                 }
4303         },
4304         {
4305                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4306                 .is_registered = 0,
4307                 .alg.aead = {
4308                         .base = {
4309                                 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4310                                 .cra_driver_name =
4311                                 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4312                                 .cra_blocksize   = 1,
4313                                 .cra_priority = CHCR_AEAD_PRIORITY,
4314                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4315                                                 sizeof(struct chcr_aead_ctx) +
4316                                                 sizeof(struct chcr_authenc_ctx),
4317
4318                         },
4319                         .ivsize = CTR_RFC3686_IV_SIZE,
4320                         .maxauthsize = SHA384_DIGEST_SIZE,
4321                         .setkey = chcr_authenc_setkey,
4322                         .setauthsize = chcr_authenc_setauthsize,
4323                 }
4324         },
4325         {
4326                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4327                 .is_registered = 0,
4328                 .alg.aead = {
4329                         .base = {
4330                                 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4331                                 .cra_driver_name =
4332                                 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4333                                 .cra_blocksize   = 1,
4334                                 .cra_priority = CHCR_AEAD_PRIORITY,
4335                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4336                                                 sizeof(struct chcr_aead_ctx) +
4337                                                 sizeof(struct chcr_authenc_ctx),
4338
4339                         },
4340                         .ivsize = CTR_RFC3686_IV_SIZE,
4341                         .maxauthsize = SHA512_DIGEST_SIZE,
4342                         .setkey = chcr_authenc_setkey,
4343                         .setauthsize = chcr_authenc_setauthsize,
4344                 }
4345         },
4346         {
4347                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4348                 .is_registered = 0,
4349                 .alg.aead = {
4350                         .base = {
4351                                 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4352                                 .cra_driver_name =
4353                                 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4354                                 .cra_blocksize   = 1,
4355                                 .cra_priority = CHCR_AEAD_PRIORITY,
4356                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4357                                                 sizeof(struct chcr_aead_ctx) +
4358                                                 sizeof(struct chcr_authenc_ctx),
4359
4360                         },
4361                         .ivsize  = CTR_RFC3686_IV_SIZE,
4362                         .maxauthsize = 0,
4363                         .setkey  = chcr_aead_digest_null_setkey,
4364                         .setauthsize = chcr_authenc_null_setauthsize,
4365                 }
4366         },
4367 };
4368
4369 /*
4370  *      chcr_unregister_alg - Deregister crypto algorithms with
4371  *      kernel framework.
4372  */
4373 static int chcr_unregister_alg(void)
4374 {
4375         int i;
4376
4377         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4378                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4379                 case CRYPTO_ALG_TYPE_SKCIPHER:
4380                         if (driver_algs[i].is_registered && refcount_read(
4381                             &driver_algs[i].alg.skcipher.base.cra_refcnt)
4382                             == 1) {
4383                                 crypto_unregister_skcipher(
4384                                                 &driver_algs[i].alg.skcipher);
4385                                 driver_algs[i].is_registered = 0;
4386                         }
4387                         break;
4388                 case CRYPTO_ALG_TYPE_AEAD:
4389                         if (driver_algs[i].is_registered && refcount_read(
4390                             &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4391                                 crypto_unregister_aead(
4392                                                 &driver_algs[i].alg.aead);
4393                                 driver_algs[i].is_registered = 0;
4394                         }
4395                         break;
4396                 case CRYPTO_ALG_TYPE_AHASH:
4397                         if (driver_algs[i].is_registered && refcount_read(
4398                             &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4399                             == 1) {
4400                                 crypto_unregister_ahash(
4401                                                 &driver_algs[i].alg.hash);
4402                                 driver_algs[i].is_registered = 0;
4403                         }
4404                         break;
4405                 }
4406         }
4407         return 0;
4408 }
4409
4410 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4411 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4412 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4413
4414 /*
4415  *      chcr_register_alg - Register crypto algorithms with kernel framework.
4416  */
4417 static int chcr_register_alg(void)
4418 {
4419         struct crypto_alg ai;
4420         struct ahash_alg *a_hash;
4421         int err = 0, i;
4422         char *name = NULL;
4423
4424         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4425                 if (driver_algs[i].is_registered)
4426                         continue;
4427                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4428                 case CRYPTO_ALG_TYPE_SKCIPHER:
4429                         driver_algs[i].alg.skcipher.base.cra_priority =
4430                                 CHCR_CRA_PRIORITY;
4431                         driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4432                         driver_algs[i].alg.skcipher.base.cra_flags =
4433                                 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4434                                 CRYPTO_ALG_ALLOCATES_MEMORY |
4435                                 CRYPTO_ALG_NEED_FALLBACK;
4436                         driver_algs[i].alg.skcipher.base.cra_ctxsize =
4437                                 sizeof(struct chcr_context) +
4438                                 sizeof(struct ablk_ctx);
4439                         driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4440
4441                         err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4442                         name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4443                         break;
4444                 case CRYPTO_ALG_TYPE_AEAD:
4445                         driver_algs[i].alg.aead.base.cra_flags =
4446                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4447                                 CRYPTO_ALG_ALLOCATES_MEMORY;
4448                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4449                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4450                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4451                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4452                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4453                         err = crypto_register_aead(&driver_algs[i].alg.aead);
4454                         name = driver_algs[i].alg.aead.base.cra_driver_name;
4455                         break;
4456                 case CRYPTO_ALG_TYPE_AHASH:
4457                         a_hash = &driver_algs[i].alg.hash;
4458                         a_hash->update = chcr_ahash_update;
4459                         a_hash->final = chcr_ahash_final;
4460                         a_hash->finup = chcr_ahash_finup;
4461                         a_hash->digest = chcr_ahash_digest;
4462                         a_hash->export = chcr_ahash_export;
4463                         a_hash->import = chcr_ahash_import;
4464                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4465                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4466                         a_hash->halg.base.cra_module = THIS_MODULE;
4467                         a_hash->halg.base.cra_flags =
4468                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4469                         a_hash->halg.base.cra_alignmask = 0;
4470                         a_hash->halg.base.cra_exit = NULL;
4471
4472                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4473                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4474                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4475                                 a_hash->init = chcr_hmac_init;
4476                                 a_hash->setkey = chcr_ahash_setkey;
4477                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4478                         } else {
4479                                 a_hash->init = chcr_sha_init;
4480                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4481                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4482                         }
4483                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
4484                         ai = driver_algs[i].alg.hash.halg.base;
4485                         name = ai.cra_driver_name;
4486                         break;
4487                 }
4488                 if (err) {
4489                         pr_err("%s : Algorithm registration failed\n", name);
4490                         goto register_err;
4491                 } else {
4492                         driver_algs[i].is_registered = 1;
4493                 }
4494         }
4495         return 0;
4496
4497 register_err:
4498         chcr_unregister_alg();
4499         return err;
4500 }
4501
4502 /*
4503  *      start_crypto - Register the crypto algorithms.
4504  *      This should called once when the first device comesup. After this
4505  *      kernel will start calling driver APIs for crypto operations.
4506  */
4507 int start_crypto(void)
4508 {
4509         return chcr_register_alg();
4510 }
4511
4512 /*
4513  *      stop_crypto - Deregister all the crypto algorithms with kernel.
4514  *      This should be called once when the last device goes down. After this
4515  *      kernel will not call the driver API for crypto operations.
4516  */
4517 int stop_crypto(void)
4518 {
4519         chcr_unregister_alg();
4520         return 0;
4521 }