Merge branch 'linux-5.2' of git://github.com/skeggsb/linux into drm-fixes
[linux-2.6-microblaze.git] / drivers / crypto / ccree / cc_aead.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
13 #include "cc_aead.h"
14 #include "cc_request_mgr.h"
15 #include "cc_hash.h"
16 #include "cc_sram_mgr.h"
17
18 #define template_aead   template_u.aead
19
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
22
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25
26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
27
28 struct cc_aead_handle {
29         cc_sram_addr_t sram_workspace_addr;
30         struct list_head aead_list;
31 };
32
33 struct cc_hmac_s {
34         u8 *padded_authkey;
35         u8 *ipad_opad; /* IPAD, OPAD*/
36         dma_addr_t padded_authkey_dma_addr;
37         dma_addr_t ipad_opad_dma_addr;
38 };
39
40 struct cc_xcbc_s {
41         u8 *xcbc_keys; /* K1,K2,K3 */
42         dma_addr_t xcbc_keys_dma_addr;
43 };
44
45 struct cc_aead_ctx {
46         struct cc_drvdata *drvdata;
47         u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
48         u8 *enckey;
49         dma_addr_t enckey_dma_addr;
50         union {
51                 struct cc_hmac_s hmac;
52                 struct cc_xcbc_s xcbc;
53         } auth_state;
54         unsigned int enc_keylen;
55         unsigned int auth_keylen;
56         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
57         unsigned int hash_len;
58         enum drv_cipher_mode cipher_mode;
59         enum cc_flow_mode flow_mode;
60         enum drv_hash_mode auth_mode;
61 };
62
63 static inline bool valid_assoclen(struct aead_request *req)
64 {
65         return ((req->assoclen == 16) || (req->assoclen == 20));
66 }
67
68 static void cc_aead_exit(struct crypto_aead *tfm)
69 {
70         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
71         struct device *dev = drvdata_to_dev(ctx->drvdata);
72
73         dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
74                 crypto_tfm_alg_name(&tfm->base));
75
76         /* Unmap enckey buffer */
77         if (ctx->enckey) {
78                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
79                                   ctx->enckey_dma_addr);
80                 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
81                         &ctx->enckey_dma_addr);
82                 ctx->enckey_dma_addr = 0;
83                 ctx->enckey = NULL;
84         }
85
86         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
87                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
88
89                 if (xcbc->xcbc_keys) {
90                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
91                                           xcbc->xcbc_keys,
92                                           xcbc->xcbc_keys_dma_addr);
93                 }
94                 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
95                         &xcbc->xcbc_keys_dma_addr);
96                 xcbc->xcbc_keys_dma_addr = 0;
97                 xcbc->xcbc_keys = NULL;
98         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
99                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
100
101                 if (hmac->ipad_opad) {
102                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
103                                           hmac->ipad_opad,
104                                           hmac->ipad_opad_dma_addr);
105                         dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
106                                 &hmac->ipad_opad_dma_addr);
107                         hmac->ipad_opad_dma_addr = 0;
108                         hmac->ipad_opad = NULL;
109                 }
110                 if (hmac->padded_authkey) {
111                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
112                                           hmac->padded_authkey,
113                                           hmac->padded_authkey_dma_addr);
114                         dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
115                                 &hmac->padded_authkey_dma_addr);
116                         hmac->padded_authkey_dma_addr = 0;
117                         hmac->padded_authkey = NULL;
118                 }
119         }
120 }
121
122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
123 {
124         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
125
126         return cc_get_default_hash_len(ctx->drvdata);
127 }
128
129 static int cc_aead_init(struct crypto_aead *tfm)
130 {
131         struct aead_alg *alg = crypto_aead_alg(tfm);
132         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
133         struct cc_crypto_alg *cc_alg =
134                         container_of(alg, struct cc_crypto_alg, aead_alg);
135         struct device *dev = drvdata_to_dev(cc_alg->drvdata);
136
137         dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
138                 crypto_tfm_alg_name(&tfm->base));
139
140         /* Initialize modes in instance */
141         ctx->cipher_mode = cc_alg->cipher_mode;
142         ctx->flow_mode = cc_alg->flow_mode;
143         ctx->auth_mode = cc_alg->auth_mode;
144         ctx->drvdata = cc_alg->drvdata;
145         crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
146
147         /* Allocate key buffer, cache line aligned */
148         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
149                                          &ctx->enckey_dma_addr, GFP_KERNEL);
150         if (!ctx->enckey) {
151                 dev_err(dev, "Failed allocating key buffer\n");
152                 goto init_failed;
153         }
154         dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
155                 ctx->enckey);
156
157         /* Set default authlen value */
158
159         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
160                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
161                 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
162
163                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
164                 /* (and temporary for user key - up to 256b) */
165                 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
166                                                      &xcbc->xcbc_keys_dma_addr,
167                                                      GFP_KERNEL);
168                 if (!xcbc->xcbc_keys) {
169                         dev_err(dev, "Failed allocating buffer for XCBC keys\n");
170                         goto init_failed;
171                 }
172         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
173                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
174                 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
175                 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
176
177                 /* Allocate dma-coherent buffer for IPAD + OPAD */
178                 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
179                                                      &hmac->ipad_opad_dma_addr,
180                                                      GFP_KERNEL);
181
182                 if (!hmac->ipad_opad) {
183                         dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
184                         goto init_failed;
185                 }
186
187                 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
188                         hmac->ipad_opad);
189
190                 hmac->padded_authkey = dma_alloc_coherent(dev,
191                                                           MAX_HMAC_BLOCK_SIZE,
192                                                           pkey_dma,
193                                                           GFP_KERNEL);
194
195                 if (!hmac->padded_authkey) {
196                         dev_err(dev, "failed to allocate padded_authkey\n");
197                         goto init_failed;
198                 }
199         } else {
200                 ctx->auth_state.hmac.ipad_opad = NULL;
201                 ctx->auth_state.hmac.padded_authkey = NULL;
202         }
203         ctx->hash_len = cc_get_aead_hash_len(tfm);
204
205         return 0;
206
207 init_failed:
208         cc_aead_exit(tfm);
209         return -ENOMEM;
210 }
211
212 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
213 {
214         struct aead_request *areq = (struct aead_request *)cc_req;
215         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
216         struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
217         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
218
219         /* BACKLOG notification */
220         if (err == -EINPROGRESS)
221                 goto done;
222
223         cc_unmap_aead_request(dev, areq);
224
225         /* Restore ordinary iv pointer */
226         areq->iv = areq_ctx->backup_iv;
227
228         if (err)
229                 goto done;
230
231         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233                            ctx->authsize) != 0) {
234                         dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235                                 ctx->authsize, ctx->cipher_mode);
236                         /* In case of payload authentication failure, MUST NOT
237                          * revealed the decrypted message --> zero its memory.
238                          */
239                         cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
240                         err = -EBADMSG;
241                 }
242         } else { /*ENCRYPT*/
243                 if (areq_ctx->is_icv_fragmented) {
244                         u32 skip = areq->cryptlen + areq_ctx->dst_offset;
245
246                         cc_copy_sg_portion(dev, areq_ctx->mac_buf,
247                                            areq_ctx->dst_sgl, skip,
248                                            (skip + ctx->authsize),
249                                            CC_SG_FROM_BUF);
250                 }
251
252                 /* If an IV was generated, copy it back to the user provided
253                  * buffer.
254                  */
255                 if (areq_ctx->backup_giv) {
256                         if (ctx->cipher_mode == DRV_CIPHER_CTR)
257                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
258                                        CTR_RFC3686_NONCE_SIZE,
259                                        CTR_RFC3686_IV_SIZE);
260                         else if (ctx->cipher_mode == DRV_CIPHER_CCM)
261                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
262                                        CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
263                 }
264         }
265 done:
266         aead_request_complete(areq, err);
267 }
268
269 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
270                                 struct cc_aead_ctx *ctx)
271 {
272         /* Load the AES key */
273         hw_desc_init(&desc[0]);
274         /* We are using for the source/user key the same buffer
275          * as for the output keys, * because after this key loading it
276          * is not needed anymore
277          */
278         set_din_type(&desc[0], DMA_DLLI,
279                      ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
280                      NS_BIT);
281         set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
282         set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
283         set_key_size_aes(&desc[0], ctx->auth_keylen);
284         set_flow_mode(&desc[0], S_DIN_to_AES);
285         set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
286
287         hw_desc_init(&desc[1]);
288         set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
289         set_flow_mode(&desc[1], DIN_AES_DOUT);
290         set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
291                       AES_KEYSIZE_128, NS_BIT, 0);
292
293         hw_desc_init(&desc[2]);
294         set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
295         set_flow_mode(&desc[2], DIN_AES_DOUT);
296         set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
297                                          + AES_KEYSIZE_128),
298                               AES_KEYSIZE_128, NS_BIT, 0);
299
300         hw_desc_init(&desc[3]);
301         set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
302         set_flow_mode(&desc[3], DIN_AES_DOUT);
303         set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
304                                           + 2 * AES_KEYSIZE_128),
305                               AES_KEYSIZE_128, NS_BIT, 0);
306
307         return 4;
308 }
309
310 static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
311 {
312         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
313         unsigned int digest_ofs = 0;
314         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
315                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
316         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
317                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
318         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
319
320         unsigned int idx = 0;
321         int i;
322
323         /* calc derived HMAC key */
324         for (i = 0; i < 2; i++) {
325                 /* Load hash initial state */
326                 hw_desc_init(&desc[idx]);
327                 set_cipher_mode(&desc[idx], hash_mode);
328                 set_din_sram(&desc[idx],
329                              cc_larval_digest_addr(ctx->drvdata,
330                                                    ctx->auth_mode),
331                              digest_size);
332                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
333                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
334                 idx++;
335
336                 /* Load the hash current length*/
337                 hw_desc_init(&desc[idx]);
338                 set_cipher_mode(&desc[idx], hash_mode);
339                 set_din_const(&desc[idx], 0, ctx->hash_len);
340                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
341                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
342                 idx++;
343
344                 /* Prepare ipad key */
345                 hw_desc_init(&desc[idx]);
346                 set_xor_val(&desc[idx], hmac_pad_const[i]);
347                 set_cipher_mode(&desc[idx], hash_mode);
348                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
349                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
350                 idx++;
351
352                 /* Perform HASH update */
353                 hw_desc_init(&desc[idx]);
354                 set_din_type(&desc[idx], DMA_DLLI,
355                              hmac->padded_authkey_dma_addr,
356                              SHA256_BLOCK_SIZE, NS_BIT);
357                 set_cipher_mode(&desc[idx], hash_mode);
358                 set_xor_active(&desc[idx]);
359                 set_flow_mode(&desc[idx], DIN_HASH);
360                 idx++;
361
362                 /* Get the digset */
363                 hw_desc_init(&desc[idx]);
364                 set_cipher_mode(&desc[idx], hash_mode);
365                 set_dout_dlli(&desc[idx],
366                               (hmac->ipad_opad_dma_addr + digest_ofs),
367                               digest_size, NS_BIT, 0);
368                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
369                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
370                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
371                 idx++;
372
373                 digest_ofs += digest_size;
374         }
375
376         return idx;
377 }
378
379 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
380 {
381         struct device *dev = drvdata_to_dev(ctx->drvdata);
382
383         dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
384                 ctx->enc_keylen, ctx->auth_keylen);
385
386         switch (ctx->auth_mode) {
387         case DRV_HASH_SHA1:
388         case DRV_HASH_SHA256:
389                 break;
390         case DRV_HASH_XCBC_MAC:
391                 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
392                     ctx->auth_keylen != AES_KEYSIZE_192 &&
393                     ctx->auth_keylen != AES_KEYSIZE_256)
394                         return -ENOTSUPP;
395                 break;
396         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
397                 if (ctx->auth_keylen > 0)
398                         return -EINVAL;
399                 break;
400         default:
401                 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
402                 return -EINVAL;
403         }
404         /* Check cipher key size */
405         if (ctx->flow_mode == S_DIN_to_DES) {
406                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
407                         dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
408                                 ctx->enc_keylen);
409                         return -EINVAL;
410                 }
411         } else { /* Default assumed to be AES ciphers */
412                 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
413                     ctx->enc_keylen != AES_KEYSIZE_192 &&
414                     ctx->enc_keylen != AES_KEYSIZE_256) {
415                         dev_err(dev, "Invalid cipher(AES) key size: %u\n",
416                                 ctx->enc_keylen);
417                         return -EINVAL;
418                 }
419         }
420
421         return 0; /* All tests of keys sizes passed */
422 }
423
424 /* This function prepers the user key so it can pass to the hmac processing
425  * (copy to intenral buffer or hash in case of key longer than block
426  */
427 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
428                                  unsigned int keylen)
429 {
430         dma_addr_t key_dma_addr = 0;
431         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
432         struct device *dev = drvdata_to_dev(ctx->drvdata);
433         u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
434         struct cc_crypto_req cc_req = {};
435         unsigned int blocksize;
436         unsigned int digestsize;
437         unsigned int hashmode;
438         unsigned int idx = 0;
439         int rc = 0;
440         u8 *key = NULL;
441         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
442         dma_addr_t padded_authkey_dma_addr =
443                 ctx->auth_state.hmac.padded_authkey_dma_addr;
444
445         switch (ctx->auth_mode) { /* auth_key required and >0 */
446         case DRV_HASH_SHA1:
447                 blocksize = SHA1_BLOCK_SIZE;
448                 digestsize = SHA1_DIGEST_SIZE;
449                 hashmode = DRV_HASH_HW_SHA1;
450                 break;
451         case DRV_HASH_SHA256:
452         default:
453                 blocksize = SHA256_BLOCK_SIZE;
454                 digestsize = SHA256_DIGEST_SIZE;
455                 hashmode = DRV_HASH_HW_SHA256;
456         }
457
458         if (keylen != 0) {
459
460                 key = kmemdup(authkey, keylen, GFP_KERNEL);
461                 if (!key)
462                         return -ENOMEM;
463
464                 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
465                                               DMA_TO_DEVICE);
466                 if (dma_mapping_error(dev, key_dma_addr)) {
467                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
468                                 key, keylen);
469                         kzfree(key);
470                         return -ENOMEM;
471                 }
472                 if (keylen > blocksize) {
473                         /* Load hash initial state */
474                         hw_desc_init(&desc[idx]);
475                         set_cipher_mode(&desc[idx], hashmode);
476                         set_din_sram(&desc[idx], larval_addr, digestsize);
477                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
478                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
479                         idx++;
480
481                         /* Load the hash current length*/
482                         hw_desc_init(&desc[idx]);
483                         set_cipher_mode(&desc[idx], hashmode);
484                         set_din_const(&desc[idx], 0, ctx->hash_len);
485                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
486                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
487                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
488                         idx++;
489
490                         hw_desc_init(&desc[idx]);
491                         set_din_type(&desc[idx], DMA_DLLI,
492                                      key_dma_addr, keylen, NS_BIT);
493                         set_flow_mode(&desc[idx], DIN_HASH);
494                         idx++;
495
496                         /* Get hashed key */
497                         hw_desc_init(&desc[idx]);
498                         set_cipher_mode(&desc[idx], hashmode);
499                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
500                                       digestsize, NS_BIT, 0);
501                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
502                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
503                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
504                         set_cipher_config0(&desc[idx],
505                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
506                         idx++;
507
508                         hw_desc_init(&desc[idx]);
509                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
510                         set_flow_mode(&desc[idx], BYPASS);
511                         set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
512                                       digestsize), (blocksize - digestsize),
513                                       NS_BIT, 0);
514                         idx++;
515                 } else {
516                         hw_desc_init(&desc[idx]);
517                         set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
518                                      keylen, NS_BIT);
519                         set_flow_mode(&desc[idx], BYPASS);
520                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
521                                       keylen, NS_BIT, 0);
522                         idx++;
523
524                         if ((blocksize - keylen) != 0) {
525                                 hw_desc_init(&desc[idx]);
526                                 set_din_const(&desc[idx], 0,
527                                               (blocksize - keylen));
528                                 set_flow_mode(&desc[idx], BYPASS);
529                                 set_dout_dlli(&desc[idx],
530                                               (padded_authkey_dma_addr +
531                                                keylen),
532                                               (blocksize - keylen), NS_BIT, 0);
533                                 idx++;
534                         }
535                 }
536         } else {
537                 hw_desc_init(&desc[idx]);
538                 set_din_const(&desc[idx], 0, (blocksize - keylen));
539                 set_flow_mode(&desc[idx], BYPASS);
540                 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
541                               blocksize, NS_BIT, 0);
542                 idx++;
543         }
544
545         rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
546         if (rc)
547                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
548
549         if (key_dma_addr)
550                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
551
552         kzfree(key);
553
554         return rc;
555 }
556
557 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
558                           unsigned int keylen)
559 {
560         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
561         struct cc_crypto_req cc_req = {};
562         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
563         unsigned int seq_len = 0;
564         struct device *dev = drvdata_to_dev(ctx->drvdata);
565         const u8 *enckey, *authkey;
566         int rc;
567
568         dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
569                 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
570
571         /* STAT_PHASE_0: Init and sanity checks */
572
573         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
574                 struct crypto_authenc_keys keys;
575
576                 rc = crypto_authenc_extractkeys(&keys, key, keylen);
577                 if (rc)
578                         goto badkey;
579                 enckey = keys.enckey;
580                 authkey = keys.authkey;
581                 ctx->enc_keylen = keys.enckeylen;
582                 ctx->auth_keylen = keys.authkeylen;
583
584                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
585                         /* the nonce is stored in bytes at end of key */
586                         rc = -EINVAL;
587                         if (ctx->enc_keylen <
588                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
589                                 goto badkey;
590                         /* Copy nonce from last 4 bytes in CTR key to
591                          *  first 4 bytes in CTR IV
592                          */
593                         memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
594                                CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
595                         /* Set CTR key size */
596                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
597                 }
598         } else { /* non-authenc - has just one key */
599                 enckey = key;
600                 authkey = NULL;
601                 ctx->enc_keylen = keylen;
602                 ctx->auth_keylen = 0;
603         }
604
605         rc = validate_keys_sizes(ctx);
606         if (rc)
607                 goto badkey;
608
609         /* STAT_PHASE_1: Copy key to ctx */
610
611         /* Get key material */
612         memcpy(ctx->enckey, enckey, ctx->enc_keylen);
613         if (ctx->enc_keylen == 24)
614                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
615         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
616                 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
617                        ctx->auth_keylen);
618         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
619                 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
620                 if (rc)
621                         goto badkey;
622         }
623
624         /* STAT_PHASE_2: Create sequence */
625
626         switch (ctx->auth_mode) {
627         case DRV_HASH_SHA1:
628         case DRV_HASH_SHA256:
629                 seq_len = hmac_setkey(desc, ctx);
630                 break;
631         case DRV_HASH_XCBC_MAC:
632                 seq_len = xcbc_setkey(desc, ctx);
633                 break;
634         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
635                 break; /* No auth. key setup */
636         default:
637                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
638                 rc = -ENOTSUPP;
639                 goto badkey;
640         }
641
642         /* STAT_PHASE_3: Submit sequence to HW */
643
644         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
645                 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
646                 if (rc) {
647                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
648                         goto setkey_error;
649                 }
650         }
651
652         /* Update STAT_PHASE_3 */
653         return rc;
654
655 badkey:
656         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
657
658 setkey_error:
659         return rc;
660 }
661
662 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
663                                unsigned int keylen)
664 {
665         struct crypto_authenc_keys keys;
666         u32 flags;
667         int err;
668
669         err = crypto_authenc_extractkeys(&keys, key, keylen);
670         if (unlikely(err))
671                 goto badkey;
672
673         err = -EINVAL;
674         if (keys.enckeylen != DES3_EDE_KEY_SIZE)
675                 goto badkey;
676
677         flags = crypto_aead_get_flags(aead);
678         err = __des3_verify_key(&flags, keys.enckey);
679         if (unlikely(err)) {
680                 crypto_aead_set_flags(aead, flags);
681                 goto out;
682         }
683
684         err = cc_aead_setkey(aead, key, keylen);
685
686 out:
687         memzero_explicit(&keys, sizeof(keys));
688         return err;
689
690 badkey:
691         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
692         goto out;
693 }
694
695 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
696                                  unsigned int keylen)
697 {
698         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
699
700         if (keylen < 3)
701                 return -EINVAL;
702
703         keylen -= 3;
704         memcpy(ctx->ctr_nonce, key + keylen, 3);
705
706         return cc_aead_setkey(tfm, key, keylen);
707 }
708
709 static int cc_aead_setauthsize(struct crypto_aead *authenc,
710                                unsigned int authsize)
711 {
712         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
713         struct device *dev = drvdata_to_dev(ctx->drvdata);
714
715         /* Unsupported auth. sizes */
716         if (authsize == 0 ||
717             authsize > crypto_aead_maxauthsize(authenc)) {
718                 return -ENOTSUPP;
719         }
720
721         ctx->authsize = authsize;
722         dev_dbg(dev, "authlen=%d\n", ctx->authsize);
723
724         return 0;
725 }
726
727 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
728                                       unsigned int authsize)
729 {
730         switch (authsize) {
731         case 8:
732         case 12:
733         case 16:
734                 break;
735         default:
736                 return -EINVAL;
737         }
738
739         return cc_aead_setauthsize(authenc, authsize);
740 }
741
742 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
743                               unsigned int authsize)
744 {
745         switch (authsize) {
746         case 4:
747         case 6:
748         case 8:
749         case 10:
750         case 12:
751         case 14:
752         case 16:
753                 break;
754         default:
755                 return -EINVAL;
756         }
757
758         return cc_aead_setauthsize(authenc, authsize);
759 }
760
761 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
762                               struct cc_hw_desc desc[], unsigned int *seq_size)
763 {
764         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
765         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
766         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
767         enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
768         unsigned int idx = *seq_size;
769         struct device *dev = drvdata_to_dev(ctx->drvdata);
770
771         switch (assoc_dma_type) {
772         case CC_DMA_BUF_DLLI:
773                 dev_dbg(dev, "ASSOC buffer type DLLI\n");
774                 hw_desc_init(&desc[idx]);
775                 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
776                              areq_ctx->assoclen, NS_BIT);
777                 set_flow_mode(&desc[idx], flow_mode);
778                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
779                     areq_ctx->cryptlen > 0)
780                         set_din_not_last_indication(&desc[idx]);
781                 break;
782         case CC_DMA_BUF_MLLI:
783                 dev_dbg(dev, "ASSOC buffer type MLLI\n");
784                 hw_desc_init(&desc[idx]);
785                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
786                              areq_ctx->assoc.mlli_nents, NS_BIT);
787                 set_flow_mode(&desc[idx], flow_mode);
788                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
789                     areq_ctx->cryptlen > 0)
790                         set_din_not_last_indication(&desc[idx]);
791                 break;
792         case CC_DMA_BUF_NULL:
793         default:
794                 dev_err(dev, "Invalid ASSOC buffer type\n");
795         }
796
797         *seq_size = (++idx);
798 }
799
800 static void cc_proc_authen_desc(struct aead_request *areq,
801                                 unsigned int flow_mode,
802                                 struct cc_hw_desc desc[],
803                                 unsigned int *seq_size, int direct)
804 {
805         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
806         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
807         unsigned int idx = *seq_size;
808         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
809         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
810         struct device *dev = drvdata_to_dev(ctx->drvdata);
811
812         switch (data_dma_type) {
813         case CC_DMA_BUF_DLLI:
814         {
815                 struct scatterlist *cipher =
816                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
817                         areq_ctx->dst_sgl : areq_ctx->src_sgl;
818
819                 unsigned int offset =
820                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
821                         areq_ctx->dst_offset : areq_ctx->src_offset;
822                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
823                 hw_desc_init(&desc[idx]);
824                 set_din_type(&desc[idx], DMA_DLLI,
825                              (sg_dma_address(cipher) + offset),
826                              areq_ctx->cryptlen, NS_BIT);
827                 set_flow_mode(&desc[idx], flow_mode);
828                 break;
829         }
830         case CC_DMA_BUF_MLLI:
831         {
832                 /* DOUBLE-PASS flow (as default)
833                  * assoc. + iv + data -compact in one table
834                  * if assoclen is ZERO only IV perform
835                  */
836                 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
837                 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
838
839                 if (areq_ctx->is_single_pass) {
840                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
841                                 mlli_addr = areq_ctx->dst.sram_addr;
842                                 mlli_nents = areq_ctx->dst.mlli_nents;
843                         } else {
844                                 mlli_addr = areq_ctx->src.sram_addr;
845                                 mlli_nents = areq_ctx->src.mlli_nents;
846                         }
847                 }
848
849                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
850                 hw_desc_init(&desc[idx]);
851                 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
852                              NS_BIT);
853                 set_flow_mode(&desc[idx], flow_mode);
854                 break;
855         }
856         case CC_DMA_BUF_NULL:
857         default:
858                 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
859         }
860
861         *seq_size = (++idx);
862 }
863
864 static void cc_proc_cipher_desc(struct aead_request *areq,
865                                 unsigned int flow_mode,
866                                 struct cc_hw_desc desc[],
867                                 unsigned int *seq_size)
868 {
869         unsigned int idx = *seq_size;
870         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
871         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
872         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
873         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
874         struct device *dev = drvdata_to_dev(ctx->drvdata);
875
876         if (areq_ctx->cryptlen == 0)
877                 return; /*null processing*/
878
879         switch (data_dma_type) {
880         case CC_DMA_BUF_DLLI:
881                 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
882                 hw_desc_init(&desc[idx]);
883                 set_din_type(&desc[idx], DMA_DLLI,
884                              (sg_dma_address(areq_ctx->src_sgl) +
885                               areq_ctx->src_offset), areq_ctx->cryptlen,
886                               NS_BIT);
887                 set_dout_dlli(&desc[idx],
888                               (sg_dma_address(areq_ctx->dst_sgl) +
889                                areq_ctx->dst_offset),
890                               areq_ctx->cryptlen, NS_BIT, 0);
891                 set_flow_mode(&desc[idx], flow_mode);
892                 break;
893         case CC_DMA_BUF_MLLI:
894                 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
895                 hw_desc_init(&desc[idx]);
896                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
897                              areq_ctx->src.mlli_nents, NS_BIT);
898                 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
899                               areq_ctx->dst.mlli_nents, NS_BIT, 0);
900                 set_flow_mode(&desc[idx], flow_mode);
901                 break;
902         case CC_DMA_BUF_NULL:
903         default:
904                 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
905         }
906
907         *seq_size = (++idx);
908 }
909
910 static void cc_proc_digest_desc(struct aead_request *req,
911                                 struct cc_hw_desc desc[],
912                                 unsigned int *seq_size)
913 {
914         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
915         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
916         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
917         unsigned int idx = *seq_size;
918         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
919                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
920         int direct = req_ctx->gen_ctx.op_type;
921
922         /* Get final ICV result */
923         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
924                 hw_desc_init(&desc[idx]);
925                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
926                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
927                 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
928                               NS_BIT, 1);
929                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
930                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
931                         set_aes_not_hash_mode(&desc[idx]);
932                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
933                 } else {
934                         set_cipher_config0(&desc[idx],
935                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
936                         set_cipher_mode(&desc[idx], hash_mode);
937                 }
938         } else { /*Decrypt*/
939                 /* Get ICV out from hardware */
940                 hw_desc_init(&desc[idx]);
941                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
942                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
943                 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
944                               ctx->authsize, NS_BIT, 1);
945                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
946                 set_cipher_config0(&desc[idx],
947                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
948                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
949                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
950                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
951                         set_aes_not_hash_mode(&desc[idx]);
952                 } else {
953                         set_cipher_mode(&desc[idx], hash_mode);
954                 }
955         }
956
957         *seq_size = (++idx);
958 }
959
960 static void cc_set_cipher_desc(struct aead_request *req,
961                                struct cc_hw_desc desc[],
962                                unsigned int *seq_size)
963 {
964         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
965         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
966         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
967         unsigned int hw_iv_size = req_ctx->hw_iv_size;
968         unsigned int idx = *seq_size;
969         int direct = req_ctx->gen_ctx.op_type;
970
971         /* Setup cipher state */
972         hw_desc_init(&desc[idx]);
973         set_cipher_config0(&desc[idx], direct);
974         set_flow_mode(&desc[idx], ctx->flow_mode);
975         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
976                      hw_iv_size, NS_BIT);
977         if (ctx->cipher_mode == DRV_CIPHER_CTR)
978                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
979         else
980                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
981         set_cipher_mode(&desc[idx], ctx->cipher_mode);
982         idx++;
983
984         /* Setup enc. key */
985         hw_desc_init(&desc[idx]);
986         set_cipher_config0(&desc[idx], direct);
987         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
988         set_flow_mode(&desc[idx], ctx->flow_mode);
989         if (ctx->flow_mode == S_DIN_to_AES) {
990                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
991                              ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
992                               ctx->enc_keylen), NS_BIT);
993                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
994         } else {
995                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
996                              ctx->enc_keylen, NS_BIT);
997                 set_key_size_des(&desc[idx], ctx->enc_keylen);
998         }
999         set_cipher_mode(&desc[idx], ctx->cipher_mode);
1000         idx++;
1001
1002         *seq_size = idx;
1003 }
1004
1005 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
1006                            unsigned int *seq_size, unsigned int data_flow_mode)
1007 {
1008         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1009         int direct = req_ctx->gen_ctx.op_type;
1010         unsigned int idx = *seq_size;
1011
1012         if (req_ctx->cryptlen == 0)
1013                 return; /*null processing*/
1014
1015         cc_set_cipher_desc(req, desc, &idx);
1016         cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
1017         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1018                 /* We must wait for DMA to write all cipher */
1019                 hw_desc_init(&desc[idx]);
1020                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1021                 set_dout_no_dma(&desc[idx], 0, 0, 1);
1022                 idx++;
1023         }
1024
1025         *seq_size = idx;
1026 }
1027
1028 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
1029                              unsigned int *seq_size)
1030 {
1031         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1032         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1033         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1034                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1035         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1036                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1037         unsigned int idx = *seq_size;
1038
1039         /* Loading hash ipad xor key state */
1040         hw_desc_init(&desc[idx]);
1041         set_cipher_mode(&desc[idx], hash_mode);
1042         set_din_type(&desc[idx], DMA_DLLI,
1043                      ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1044                      NS_BIT);
1045         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1046         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1047         idx++;
1048
1049         /* Load init. digest len (64 bytes) */
1050         hw_desc_init(&desc[idx]);
1051         set_cipher_mode(&desc[idx], hash_mode);
1052         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1053                      ctx->hash_len);
1054         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1055         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1056         idx++;
1057
1058         *seq_size = idx;
1059 }
1060
1061 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1062                              unsigned int *seq_size)
1063 {
1064         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1065         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1066         unsigned int idx = *seq_size;
1067
1068         /* Loading MAC state */
1069         hw_desc_init(&desc[idx]);
1070         set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1071         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1072         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1073         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1074         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1075         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1076         set_aes_not_hash_mode(&desc[idx]);
1077         idx++;
1078
1079         /* Setup XCBC MAC K1 */
1080         hw_desc_init(&desc[idx]);
1081         set_din_type(&desc[idx], DMA_DLLI,
1082                      ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1083                      AES_KEYSIZE_128, NS_BIT);
1084         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1085         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1086         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1087         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1088         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1089         set_aes_not_hash_mode(&desc[idx]);
1090         idx++;
1091
1092         /* Setup XCBC MAC K2 */
1093         hw_desc_init(&desc[idx]);
1094         set_din_type(&desc[idx], DMA_DLLI,
1095                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1096                       AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1097         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1098         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1099         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1100         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1101         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1102         set_aes_not_hash_mode(&desc[idx]);
1103         idx++;
1104
1105         /* Setup XCBC MAC K3 */
1106         hw_desc_init(&desc[idx]);
1107         set_din_type(&desc[idx], DMA_DLLI,
1108                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1109                       2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1110         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1111         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1112         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1113         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1114         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1115         set_aes_not_hash_mode(&desc[idx]);
1116         idx++;
1117
1118         *seq_size = idx;
1119 }
1120
1121 static void cc_proc_header_desc(struct aead_request *req,
1122                                 struct cc_hw_desc desc[],
1123                                 unsigned int *seq_size)
1124 {
1125         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1126         unsigned int idx = *seq_size;
1127
1128         /* Hash associated data */
1129         if (areq_ctx->assoclen > 0)
1130                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1131
1132         /* Hash IV */
1133         *seq_size = idx;
1134 }
1135
1136 static void cc_proc_scheme_desc(struct aead_request *req,
1137                                 struct cc_hw_desc desc[],
1138                                 unsigned int *seq_size)
1139 {
1140         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1141         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1142         struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1143         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1144                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1145         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1146                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1147         unsigned int idx = *seq_size;
1148
1149         hw_desc_init(&desc[idx]);
1150         set_cipher_mode(&desc[idx], hash_mode);
1151         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1152                       ctx->hash_len);
1153         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1154         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1155         set_cipher_do(&desc[idx], DO_PAD);
1156         idx++;
1157
1158         /* Get final ICV result */
1159         hw_desc_init(&desc[idx]);
1160         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1161                       digest_size);
1162         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1163         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1164         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1165         set_cipher_mode(&desc[idx], hash_mode);
1166         idx++;
1167
1168         /* Loading hash opad xor key state */
1169         hw_desc_init(&desc[idx]);
1170         set_cipher_mode(&desc[idx], hash_mode);
1171         set_din_type(&desc[idx], DMA_DLLI,
1172                      (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1173                      digest_size, NS_BIT);
1174         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1175         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1176         idx++;
1177
1178         /* Load init. digest len (64 bytes) */
1179         hw_desc_init(&desc[idx]);
1180         set_cipher_mode(&desc[idx], hash_mode);
1181         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1182                      ctx->hash_len);
1183         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1184         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1185         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1186         idx++;
1187
1188         /* Perform HASH update */
1189         hw_desc_init(&desc[idx]);
1190         set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1191                      digest_size);
1192         set_flow_mode(&desc[idx], DIN_HASH);
1193         idx++;
1194
1195         *seq_size = idx;
1196 }
1197
1198 static void cc_mlli_to_sram(struct aead_request *req,
1199                             struct cc_hw_desc desc[], unsigned int *seq_size)
1200 {
1201         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1202         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1203         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1204         struct device *dev = drvdata_to_dev(ctx->drvdata);
1205
1206         if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1207             req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1208             !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1209                 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1210                         (unsigned int)ctx->drvdata->mlli_sram_addr,
1211                         req_ctx->mlli_params.mlli_len);
1212                 /* Copy MLLI table host-to-sram */
1213                 hw_desc_init(&desc[*seq_size]);
1214                 set_din_type(&desc[*seq_size], DMA_DLLI,
1215                              req_ctx->mlli_params.mlli_dma_addr,
1216                              req_ctx->mlli_params.mlli_len, NS_BIT);
1217                 set_dout_sram(&desc[*seq_size],
1218                               ctx->drvdata->mlli_sram_addr,
1219                               req_ctx->mlli_params.mlli_len);
1220                 set_flow_mode(&desc[*seq_size], BYPASS);
1221                 (*seq_size)++;
1222         }
1223 }
1224
1225 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1226                                           enum cc_flow_mode setup_flow_mode,
1227                                           bool is_single_pass)
1228 {
1229         enum cc_flow_mode data_flow_mode;
1230
1231         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1232                 if (setup_flow_mode == S_DIN_to_AES)
1233                         data_flow_mode = is_single_pass ?
1234                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1235                 else
1236                         data_flow_mode = is_single_pass ?
1237                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1238         } else { /* Decrypt */
1239                 if (setup_flow_mode == S_DIN_to_AES)
1240                         data_flow_mode = is_single_pass ?
1241                                 AES_and_HASH : DIN_AES_DOUT;
1242                 else
1243                         data_flow_mode = is_single_pass ?
1244                                 DES_and_HASH : DIN_DES_DOUT;
1245         }
1246
1247         return data_flow_mode;
1248 }
1249
1250 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1251                             unsigned int *seq_size)
1252 {
1253         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1254         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1255         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1256         int direct = req_ctx->gen_ctx.op_type;
1257         unsigned int data_flow_mode =
1258                 cc_get_data_flow(direct, ctx->flow_mode,
1259                                  req_ctx->is_single_pass);
1260
1261         if (req_ctx->is_single_pass) {
1262                 /**
1263                  * Single-pass flow
1264                  */
1265                 cc_set_hmac_desc(req, desc, seq_size);
1266                 cc_set_cipher_desc(req, desc, seq_size);
1267                 cc_proc_header_desc(req, desc, seq_size);
1268                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1269                 cc_proc_scheme_desc(req, desc, seq_size);
1270                 cc_proc_digest_desc(req, desc, seq_size);
1271                 return;
1272         }
1273
1274         /**
1275          * Double-pass flow
1276          * Fallback for unsupported single-pass modes,
1277          * i.e. using assoc. data of non-word-multiple
1278          */
1279         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1280                 /* encrypt first.. */
1281                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1282                 /* authenc after..*/
1283                 cc_set_hmac_desc(req, desc, seq_size);
1284                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1285                 cc_proc_scheme_desc(req, desc, seq_size);
1286                 cc_proc_digest_desc(req, desc, seq_size);
1287
1288         } else { /*DECRYPT*/
1289                 /* authenc first..*/
1290                 cc_set_hmac_desc(req, desc, seq_size);
1291                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1292                 cc_proc_scheme_desc(req, desc, seq_size);
1293                 /* decrypt after.. */
1294                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1295                 /* read the digest result with setting the completion bit
1296                  * must be after the cipher operation
1297                  */
1298                 cc_proc_digest_desc(req, desc, seq_size);
1299         }
1300 }
1301
1302 static void
1303 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1304                 unsigned int *seq_size)
1305 {
1306         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1307         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1308         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1309         int direct = req_ctx->gen_ctx.op_type;
1310         unsigned int data_flow_mode =
1311                 cc_get_data_flow(direct, ctx->flow_mode,
1312                                  req_ctx->is_single_pass);
1313
1314         if (req_ctx->is_single_pass) {
1315                 /**
1316                  * Single-pass flow
1317                  */
1318                 cc_set_xcbc_desc(req, desc, seq_size);
1319                 cc_set_cipher_desc(req, desc, seq_size);
1320                 cc_proc_header_desc(req, desc, seq_size);
1321                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1322                 cc_proc_digest_desc(req, desc, seq_size);
1323                 return;
1324         }
1325
1326         /**
1327          * Double-pass flow
1328          * Fallback for unsupported single-pass modes,
1329          * i.e. using assoc. data of non-word-multiple
1330          */
1331         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1332                 /* encrypt first.. */
1333                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1334                 /* authenc after.. */
1335                 cc_set_xcbc_desc(req, desc, seq_size);
1336                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1337                 cc_proc_digest_desc(req, desc, seq_size);
1338         } else { /*DECRYPT*/
1339                 /* authenc first.. */
1340                 cc_set_xcbc_desc(req, desc, seq_size);
1341                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1342                 /* decrypt after..*/
1343                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1344                 /* read the digest result with setting the completion bit
1345                  * must be after the cipher operation
1346                  */
1347                 cc_proc_digest_desc(req, desc, seq_size);
1348         }
1349 }
1350
1351 static int validate_data_size(struct cc_aead_ctx *ctx,
1352                               enum drv_crypto_direction direct,
1353                               struct aead_request *req)
1354 {
1355         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1356         struct device *dev = drvdata_to_dev(ctx->drvdata);
1357         unsigned int assoclen = areq_ctx->assoclen;
1358         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1359                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1360
1361         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1362             req->cryptlen < ctx->authsize)
1363                 goto data_size_err;
1364
1365         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1366
1367         switch (ctx->flow_mode) {
1368         case S_DIN_to_AES:
1369                 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1370                     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1371                         goto data_size_err;
1372                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1373                         break;
1374                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1375                         if (areq_ctx->plaintext_authenticate_only)
1376                                 areq_ctx->is_single_pass = false;
1377                         break;
1378                 }
1379
1380                 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1381                         areq_ctx->is_single_pass = false;
1382
1383                 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1384                     !IS_ALIGNED(cipherlen, sizeof(u32)))
1385                         areq_ctx->is_single_pass = false;
1386
1387                 break;
1388         case S_DIN_to_DES:
1389                 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1390                         goto data_size_err;
1391                 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1392                         areq_ctx->is_single_pass = false;
1393                 break;
1394         default:
1395                 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1396                 goto data_size_err;
1397         }
1398
1399         return 0;
1400
1401 data_size_err:
1402         return -EINVAL;
1403 }
1404
1405 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1406 {
1407         unsigned int len = 0;
1408
1409         if (header_size == 0)
1410                 return 0;
1411
1412         if (header_size < ((1UL << 16) - (1UL << 8))) {
1413                 len = 2;
1414
1415                 pa0_buff[0] = (header_size >> 8) & 0xFF;
1416                 pa0_buff[1] = header_size & 0xFF;
1417         } else {
1418                 len = 6;
1419
1420                 pa0_buff[0] = 0xFF;
1421                 pa0_buff[1] = 0xFE;
1422                 pa0_buff[2] = (header_size >> 24) & 0xFF;
1423                 pa0_buff[3] = (header_size >> 16) & 0xFF;
1424                 pa0_buff[4] = (header_size >> 8) & 0xFF;
1425                 pa0_buff[5] = header_size & 0xFF;
1426         }
1427
1428         return len;
1429 }
1430
1431 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1432 {
1433         __be32 data;
1434
1435         memset(block, 0, csize);
1436         block += csize;
1437
1438         if (csize >= 4)
1439                 csize = 4;
1440         else if (msglen > (1 << (8 * csize)))
1441                 return -EOVERFLOW;
1442
1443         data = cpu_to_be32(msglen);
1444         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1445
1446         return 0;
1447 }
1448
1449 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1450                   unsigned int *seq_size)
1451 {
1452         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1453         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1454         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1455         unsigned int idx = *seq_size;
1456         unsigned int cipher_flow_mode;
1457         dma_addr_t mac_result;
1458
1459         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1460                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1461                 mac_result = req_ctx->mac_buf_dma_addr;
1462         } else { /* Encrypt */
1463                 cipher_flow_mode = AES_and_HASH;
1464                 mac_result = req_ctx->icv_dma_addr;
1465         }
1466
1467         /* load key */
1468         hw_desc_init(&desc[idx]);
1469         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1470         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1471                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1472                       ctx->enc_keylen), NS_BIT);
1473         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1474         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1475         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1476         set_flow_mode(&desc[idx], S_DIN_to_AES);
1477         idx++;
1478
1479         /* load ctr state */
1480         hw_desc_init(&desc[idx]);
1481         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1482         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1483         set_din_type(&desc[idx], DMA_DLLI,
1484                      req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1485         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1486         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1487         set_flow_mode(&desc[idx], S_DIN_to_AES);
1488         idx++;
1489
1490         /* load MAC key */
1491         hw_desc_init(&desc[idx]);
1492         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1493         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1494                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1495                       ctx->enc_keylen), NS_BIT);
1496         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1497         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1498         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1499         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1500         set_aes_not_hash_mode(&desc[idx]);
1501         idx++;
1502
1503         /* load MAC state */
1504         hw_desc_init(&desc[idx]);
1505         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1506         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1507         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1508                      AES_BLOCK_SIZE, NS_BIT);
1509         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1510         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1511         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1512         set_aes_not_hash_mode(&desc[idx]);
1513         idx++;
1514
1515         /* process assoc data */
1516         if (req_ctx->assoclen > 0) {
1517                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1518         } else {
1519                 hw_desc_init(&desc[idx]);
1520                 set_din_type(&desc[idx], DMA_DLLI,
1521                              sg_dma_address(&req_ctx->ccm_adata_sg),
1522                              AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1523                 set_flow_mode(&desc[idx], DIN_HASH);
1524                 idx++;
1525         }
1526
1527         /* process the cipher */
1528         if (req_ctx->cryptlen)
1529                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1530
1531         /* Read temporal MAC */
1532         hw_desc_init(&desc[idx]);
1533         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1534         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1535                       NS_BIT, 0);
1536         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1537         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1538         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1539         set_aes_not_hash_mode(&desc[idx]);
1540         idx++;
1541
1542         /* load AES-CTR state (for last MAC calculation)*/
1543         hw_desc_init(&desc[idx]);
1544         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1545         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1546         set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1547                      AES_BLOCK_SIZE, NS_BIT);
1548         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1549         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1550         set_flow_mode(&desc[idx], S_DIN_to_AES);
1551         idx++;
1552
1553         hw_desc_init(&desc[idx]);
1554         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1555         set_dout_no_dma(&desc[idx], 0, 0, 1);
1556         idx++;
1557
1558         /* encrypt the "T" value and store MAC in mac_state */
1559         hw_desc_init(&desc[idx]);
1560         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1561                      ctx->authsize, NS_BIT);
1562         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1563         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1564         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1565         idx++;
1566
1567         *seq_size = idx;
1568         return 0;
1569 }
1570
1571 static int config_ccm_adata(struct aead_request *req)
1572 {
1573         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1574         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1575         struct device *dev = drvdata_to_dev(ctx->drvdata);
1576         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1577         //unsigned int size_of_a = 0, rem_a_size = 0;
1578         unsigned int lp = req->iv[0];
1579         /* Note: The code assume that req->iv[0] already contains the value
1580          * of L' of RFC3610
1581          */
1582         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1583         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1584         u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1585         u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1586         u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1587         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1588                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1589                                 req->cryptlen :
1590                                 (req->cryptlen - ctx->authsize);
1591         int rc;
1592
1593         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1594         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1595
1596         /* taken from crypto/ccm.c */
1597         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1598         if (l < 2 || l > 8) {
1599                 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1600                 return -EINVAL;
1601         }
1602         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1603
1604         /* format control info per RFC 3610 and
1605          * NIST Special Publication 800-38C
1606          */
1607         *b0 |= (8 * ((m - 2) / 2));
1608         if (req_ctx->assoclen > 0)
1609                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1610
1611         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1612         if (rc) {
1613                 dev_err(dev, "message len overflow detected");
1614                 return rc;
1615         }
1616          /* END of "taken from crypto/ccm.c" */
1617
1618         /* l(a) - size of associated data. */
1619         req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1620
1621         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1622         req->iv[15] = 1;
1623
1624         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1625         ctr_count_0[15] = 0;
1626
1627         return 0;
1628 }
1629
1630 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1631 {
1632         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1633         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1634         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1635
1636         /* L' */
1637         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1638         /* For RFC 4309, always use 4 bytes for message length
1639          * (at most 2^32-1 bytes).
1640          */
1641         areq_ctx->ctr_iv[0] = 3;
1642
1643         /* In RFC 4309 there is an 11-bytes nonce+IV part,
1644          * that we build here.
1645          */
1646         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1647                CCM_BLOCK_NONCE_SIZE);
1648         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1649                CCM_BLOCK_IV_SIZE);
1650         req->iv = areq_ctx->ctr_iv;
1651         areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1652 }
1653
1654 static void cc_set_ghash_desc(struct aead_request *req,
1655                               struct cc_hw_desc desc[], unsigned int *seq_size)
1656 {
1657         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1658         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1659         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1660         unsigned int idx = *seq_size;
1661
1662         /* load key to AES*/
1663         hw_desc_init(&desc[idx]);
1664         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1665         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1666         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1667                      ctx->enc_keylen, NS_BIT);
1668         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1669         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1670         set_flow_mode(&desc[idx], S_DIN_to_AES);
1671         idx++;
1672
1673         /* process one zero block to generate hkey */
1674         hw_desc_init(&desc[idx]);
1675         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1676         set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1677                       NS_BIT, 0);
1678         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1679         idx++;
1680
1681         /* Memory Barrier */
1682         hw_desc_init(&desc[idx]);
1683         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1684         set_dout_no_dma(&desc[idx], 0, 0, 1);
1685         idx++;
1686
1687         /* Load GHASH subkey */
1688         hw_desc_init(&desc[idx]);
1689         set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1690                      AES_BLOCK_SIZE, NS_BIT);
1691         set_dout_no_dma(&desc[idx], 0, 0, 1);
1692         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1693         set_aes_not_hash_mode(&desc[idx]);
1694         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1695         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1696         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1697         idx++;
1698
1699         /* Configure Hash Engine to work with GHASH.
1700          * Since it was not possible to extend HASH submodes to add GHASH,
1701          * The following command is necessary in order to
1702          * select GHASH (according to HW designers)
1703          */
1704         hw_desc_init(&desc[idx]);
1705         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1706         set_dout_no_dma(&desc[idx], 0, 0, 1);
1707         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1708         set_aes_not_hash_mode(&desc[idx]);
1709         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1710         set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1711         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1712         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1713         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1714         idx++;
1715
1716         /* Load GHASH initial STATE (which is 0). (for any hash there is an
1717          * initial state)
1718          */
1719         hw_desc_init(&desc[idx]);
1720         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1721         set_dout_no_dma(&desc[idx], 0, 0, 1);
1722         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1723         set_aes_not_hash_mode(&desc[idx]);
1724         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1725         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1726         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1727         idx++;
1728
1729         *seq_size = idx;
1730 }
1731
1732 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1733                              unsigned int *seq_size)
1734 {
1735         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1736         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1737         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1738         unsigned int idx = *seq_size;
1739
1740         /* load key to AES*/
1741         hw_desc_init(&desc[idx]);
1742         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1743         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1744         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1745                      ctx->enc_keylen, NS_BIT);
1746         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1747         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1748         set_flow_mode(&desc[idx], S_DIN_to_AES);
1749         idx++;
1750
1751         if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1752                 /* load AES/CTR initial CTR value inc by 2*/
1753                 hw_desc_init(&desc[idx]);
1754                 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1755                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1756                 set_din_type(&desc[idx], DMA_DLLI,
1757                              req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1758                              NS_BIT);
1759                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1760                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1761                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1762                 idx++;
1763         }
1764
1765         *seq_size = idx;
1766 }
1767
1768 static void cc_proc_gcm_result(struct aead_request *req,
1769                                struct cc_hw_desc desc[],
1770                                unsigned int *seq_size)
1771 {
1772         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1773         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1774         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1775         dma_addr_t mac_result;
1776         unsigned int idx = *seq_size;
1777
1778         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1779                 mac_result = req_ctx->mac_buf_dma_addr;
1780         } else { /* Encrypt */
1781                 mac_result = req_ctx->icv_dma_addr;
1782         }
1783
1784         /* process(ghash) gcm_block_len */
1785         hw_desc_init(&desc[idx]);
1786         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1787                      AES_BLOCK_SIZE, NS_BIT);
1788         set_flow_mode(&desc[idx], DIN_HASH);
1789         idx++;
1790
1791         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1792         hw_desc_init(&desc[idx]);
1793         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1794         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1795         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1796                       NS_BIT, 0);
1797         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1798         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1799         set_aes_not_hash_mode(&desc[idx]);
1800
1801         idx++;
1802
1803         /* load AES/CTR initial CTR value inc by 1*/
1804         hw_desc_init(&desc[idx]);
1805         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1806         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1807         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1808                      AES_BLOCK_SIZE, NS_BIT);
1809         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1810         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1811         set_flow_mode(&desc[idx], S_DIN_to_AES);
1812         idx++;
1813
1814         /* Memory Barrier */
1815         hw_desc_init(&desc[idx]);
1816         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1817         set_dout_no_dma(&desc[idx], 0, 0, 1);
1818         idx++;
1819
1820         /* process GCTR on stored GHASH and store MAC in mac_state*/
1821         hw_desc_init(&desc[idx]);
1822         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1823         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1824                      AES_BLOCK_SIZE, NS_BIT);
1825         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1826         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1827         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1828         idx++;
1829
1830         *seq_size = idx;
1831 }
1832
1833 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1834                   unsigned int *seq_size)
1835 {
1836         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1837         unsigned int cipher_flow_mode;
1838
1839         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1840                 cipher_flow_mode = AES_and_HASH;
1841         } else { /* Encrypt */
1842                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1843         }
1844
1845         //in RFC4543 no data to encrypt. just copy data from src to dest.
1846         if (req_ctx->plaintext_authenticate_only) {
1847                 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1848                 cc_set_ghash_desc(req, desc, seq_size);
1849                 /* process(ghash) assoc data */
1850                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1851                 cc_set_gctr_desc(req, desc, seq_size);
1852                 cc_proc_gcm_result(req, desc, seq_size);
1853                 return 0;
1854         }
1855
1856         // for gcm and rfc4106.
1857         cc_set_ghash_desc(req, desc, seq_size);
1858         /* process(ghash) assoc data */
1859         if (req_ctx->assoclen > 0)
1860                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1861         cc_set_gctr_desc(req, desc, seq_size);
1862         /* process(gctr+ghash) */
1863         if (req_ctx->cryptlen)
1864                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1865         cc_proc_gcm_result(req, desc, seq_size);
1866
1867         return 0;
1868 }
1869
1870 static int config_gcm_context(struct aead_request *req)
1871 {
1872         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1873         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1874         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1875         struct device *dev = drvdata_to_dev(ctx->drvdata);
1876
1877         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1878                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1879                                 req->cryptlen :
1880                                 (req->cryptlen - ctx->authsize);
1881         __be32 counter = cpu_to_be32(2);
1882
1883         dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1884                 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1885
1886         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1887
1888         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1889
1890         memcpy(req->iv + 12, &counter, 4);
1891         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1892
1893         counter = cpu_to_be32(1);
1894         memcpy(req->iv + 12, &counter, 4);
1895         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1896
1897         if (!req_ctx->plaintext_authenticate_only) {
1898                 __be64 temp64;
1899
1900                 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1901                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1902                 temp64 = cpu_to_be64(cryptlen * 8);
1903                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1904         } else {
1905                 /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1906                  * data that is nothing is encrypted.
1907                  */
1908                 __be64 temp64;
1909
1910                 temp64 = cpu_to_be64((req_ctx->assoclen +
1911                                       GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1912                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1913                 temp64 = 0;
1914                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1915         }
1916
1917         return 0;
1918 }
1919
1920 static void cc_proc_rfc4_gcm(struct aead_request *req)
1921 {
1922         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1923         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1924         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1925
1926         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1927                ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1928         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1929                GCM_BLOCK_RFC4_IV_SIZE);
1930         req->iv = areq_ctx->ctr_iv;
1931         areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1932 }
1933
1934 static int cc_proc_aead(struct aead_request *req,
1935                         enum drv_crypto_direction direct)
1936 {
1937         int rc = 0;
1938         int seq_len = 0;
1939         struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1940         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1941         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1942         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1943         struct device *dev = drvdata_to_dev(ctx->drvdata);
1944         struct cc_crypto_req cc_req = {};
1945
1946         dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1947                 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1948                 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1949                 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1950
1951         /* STAT_PHASE_0: Init and sanity checks */
1952
1953         /* Check data length according to mode */
1954         if (validate_data_size(ctx, direct, req)) {
1955                 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1956                         req->cryptlen, areq_ctx->assoclen);
1957                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1958                 return -EINVAL;
1959         }
1960
1961         /* Setup request structure */
1962         cc_req.user_cb = (void *)cc_aead_complete;
1963         cc_req.user_arg = (void *)req;
1964
1965         /* Setup request context */
1966         areq_ctx->gen_ctx.op_type = direct;
1967         areq_ctx->req_authsize = ctx->authsize;
1968         areq_ctx->cipher_mode = ctx->cipher_mode;
1969
1970         /* STAT_PHASE_1: Map buffers */
1971
1972         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1973                 /* Build CTR IV - Copy nonce from last 4 bytes in
1974                  * CTR key to first 4 bytes in CTR IV
1975                  */
1976                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1977                        CTR_RFC3686_NONCE_SIZE);
1978                 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1979                         memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1980                                req->iv, CTR_RFC3686_IV_SIZE);
1981                 /* Initialize counter portion of counter block */
1982                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1983                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1984
1985                 /* Replace with counter iv */
1986                 req->iv = areq_ctx->ctr_iv;
1987                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1988         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1989                    (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1990                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1991                 if (areq_ctx->ctr_iv != req->iv) {
1992                         memcpy(areq_ctx->ctr_iv, req->iv,
1993                                crypto_aead_ivsize(tfm));
1994                         req->iv = areq_ctx->ctr_iv;
1995                 }
1996         }  else {
1997                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1998         }
1999
2000         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2001                 rc = config_ccm_adata(req);
2002                 if (rc) {
2003                         dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
2004                                 rc);
2005                         goto exit;
2006                 }
2007         } else {
2008                 areq_ctx->ccm_hdr_size = ccm_header_size_null;
2009         }
2010
2011         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2012                 rc = config_gcm_context(req);
2013                 if (rc) {
2014                         dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
2015                                 rc);
2016                         goto exit;
2017                 }
2018         }
2019
2020         rc = cc_map_aead_request(ctx->drvdata, req);
2021         if (rc) {
2022                 dev_err(dev, "map_request() failed\n");
2023                 goto exit;
2024         }
2025
2026         /* do we need to generate IV? */
2027         if (areq_ctx->backup_giv) {
2028                 /* set the DMA mapped IV address*/
2029                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2030                         cc_req.ivgen_dma_addr[0] =
2031                                 areq_ctx->gen_ctx.iv_dma_addr +
2032                                 CTR_RFC3686_NONCE_SIZE;
2033                         cc_req.ivgen_dma_addr_len = 1;
2034                 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2035                         /* In ccm, the IV needs to exist both inside B0 and
2036                          * inside the counter.It is also copied to iv_dma_addr
2037                          * for other reasons (like returning it to the user).
2038                          * So, using 3 (identical) IV outputs.
2039                          */
2040                         cc_req.ivgen_dma_addr[0] =
2041                                 areq_ctx->gen_ctx.iv_dma_addr +
2042                                 CCM_BLOCK_IV_OFFSET;
2043                         cc_req.ivgen_dma_addr[1] =
2044                                 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2045                                 CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2046                         cc_req.ivgen_dma_addr[2] =
2047                                 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2048                                 CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2049                         cc_req.ivgen_dma_addr_len = 3;
2050                 } else {
2051                         cc_req.ivgen_dma_addr[0] =
2052                                 areq_ctx->gen_ctx.iv_dma_addr;
2053                         cc_req.ivgen_dma_addr_len = 1;
2054                 }
2055
2056                 /* set the IV size (8/16 B long)*/
2057                 cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2058         }
2059
2060         /* STAT_PHASE_2: Create sequence */
2061
2062         /* Load MLLI tables to SRAM if necessary */
2063         cc_mlli_to_sram(req, desc, &seq_len);
2064
2065         /*TODO: move seq len by reference */
2066         switch (ctx->auth_mode) {
2067         case DRV_HASH_SHA1:
2068         case DRV_HASH_SHA256:
2069                 cc_hmac_authenc(req, desc, &seq_len);
2070                 break;
2071         case DRV_HASH_XCBC_MAC:
2072                 cc_xcbc_authenc(req, desc, &seq_len);
2073                 break;
2074         case DRV_HASH_NULL:
2075                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2076                         cc_ccm(req, desc, &seq_len);
2077                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2078                         cc_gcm(req, desc, &seq_len);
2079                 break;
2080         default:
2081                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2082                 cc_unmap_aead_request(dev, req);
2083                 rc = -ENOTSUPP;
2084                 goto exit;
2085         }
2086
2087         /* STAT_PHASE_3: Lock HW and push sequence */
2088
2089         rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2090
2091         if (rc != -EINPROGRESS && rc != -EBUSY) {
2092                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2093                 cc_unmap_aead_request(dev, req);
2094         }
2095
2096 exit:
2097         return rc;
2098 }
2099
2100 static int cc_aead_encrypt(struct aead_request *req)
2101 {
2102         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2103         int rc;
2104
2105         memset(areq_ctx, 0, sizeof(*areq_ctx));
2106
2107         /* No generated IV required */
2108         areq_ctx->backup_iv = req->iv;
2109         areq_ctx->assoclen = req->assoclen;
2110         areq_ctx->backup_giv = NULL;
2111         areq_ctx->is_gcm4543 = false;
2112
2113         areq_ctx->plaintext_authenticate_only = false;
2114
2115         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2116         if (rc != -EINPROGRESS && rc != -EBUSY)
2117                 req->iv = areq_ctx->backup_iv;
2118
2119         return rc;
2120 }
2121
2122 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2123 {
2124         /* Very similar to cc_aead_encrypt() above. */
2125
2126         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2127         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2128         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2129         struct device *dev = drvdata_to_dev(ctx->drvdata);
2130         int rc = -EINVAL;
2131
2132         if (!valid_assoclen(req)) {
2133                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2134                 goto out;
2135         }
2136
2137         memset(areq_ctx, 0, sizeof(*areq_ctx));
2138
2139         /* No generated IV required */
2140         areq_ctx->backup_iv = req->iv;
2141         areq_ctx->assoclen = req->assoclen;
2142         areq_ctx->backup_giv = NULL;
2143         areq_ctx->is_gcm4543 = true;
2144
2145         cc_proc_rfc4309_ccm(req);
2146
2147         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2148         if (rc != -EINPROGRESS && rc != -EBUSY)
2149                 req->iv = areq_ctx->backup_iv;
2150 out:
2151         return rc;
2152 }
2153
2154 static int cc_aead_decrypt(struct aead_request *req)
2155 {
2156         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2157         int rc;
2158
2159         memset(areq_ctx, 0, sizeof(*areq_ctx));
2160
2161         /* No generated IV required */
2162         areq_ctx->backup_iv = req->iv;
2163         areq_ctx->assoclen = req->assoclen;
2164         areq_ctx->backup_giv = NULL;
2165         areq_ctx->is_gcm4543 = false;
2166
2167         areq_ctx->plaintext_authenticate_only = false;
2168
2169         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2170         if (rc != -EINPROGRESS && rc != -EBUSY)
2171                 req->iv = areq_ctx->backup_iv;
2172
2173         return rc;
2174 }
2175
2176 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2177 {
2178         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2179         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2180         struct device *dev = drvdata_to_dev(ctx->drvdata);
2181         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2182         int rc = -EINVAL;
2183
2184         if (!valid_assoclen(req)) {
2185                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2186                 goto out;
2187         }
2188
2189         memset(areq_ctx, 0, sizeof(*areq_ctx));
2190
2191         /* No generated IV required */
2192         areq_ctx->backup_iv = req->iv;
2193         areq_ctx->assoclen = req->assoclen;
2194         areq_ctx->backup_giv = NULL;
2195
2196         areq_ctx->is_gcm4543 = true;
2197         cc_proc_rfc4309_ccm(req);
2198
2199         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2200         if (rc != -EINPROGRESS && rc != -EBUSY)
2201                 req->iv = areq_ctx->backup_iv;
2202
2203 out:
2204         return rc;
2205 }
2206
2207 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2208                                  unsigned int keylen)
2209 {
2210         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2211         struct device *dev = drvdata_to_dev(ctx->drvdata);
2212
2213         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2214
2215         if (keylen < 4)
2216                 return -EINVAL;
2217
2218         keylen -= 4;
2219         memcpy(ctx->ctr_nonce, key + keylen, 4);
2220
2221         return cc_aead_setkey(tfm, key, keylen);
2222 }
2223
2224 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2225                                  unsigned int keylen)
2226 {
2227         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2228         struct device *dev = drvdata_to_dev(ctx->drvdata);
2229
2230         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2231
2232         if (keylen < 4)
2233                 return -EINVAL;
2234
2235         keylen -= 4;
2236         memcpy(ctx->ctr_nonce, key + keylen, 4);
2237
2238         return cc_aead_setkey(tfm, key, keylen);
2239 }
2240
2241 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2242                               unsigned int authsize)
2243 {
2244         switch (authsize) {
2245         case 4:
2246         case 8:
2247         case 12:
2248         case 13:
2249         case 14:
2250         case 15:
2251         case 16:
2252                 break;
2253         default:
2254                 return -EINVAL;
2255         }
2256
2257         return cc_aead_setauthsize(authenc, authsize);
2258 }
2259
2260 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2261                                       unsigned int authsize)
2262 {
2263         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2264         struct device *dev = drvdata_to_dev(ctx->drvdata);
2265
2266         dev_dbg(dev, "authsize %d\n", authsize);
2267
2268         switch (authsize) {
2269         case 8:
2270         case 12:
2271         case 16:
2272                 break;
2273         default:
2274                 return -EINVAL;
2275         }
2276
2277         return cc_aead_setauthsize(authenc, authsize);
2278 }
2279
2280 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2281                                       unsigned int authsize)
2282 {
2283         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2284         struct device *dev = drvdata_to_dev(ctx->drvdata);
2285
2286         dev_dbg(dev, "authsize %d\n", authsize);
2287
2288         if (authsize != 16)
2289                 return -EINVAL;
2290
2291         return cc_aead_setauthsize(authenc, authsize);
2292 }
2293
2294 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2295 {
2296         /* Very similar to cc_aead_encrypt() above. */
2297
2298         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2299         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2300         struct device *dev = drvdata_to_dev(ctx->drvdata);
2301         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2302         int rc = -EINVAL;
2303
2304         if (!valid_assoclen(req)) {
2305                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2306                 goto out;
2307         }
2308
2309         memset(areq_ctx, 0, sizeof(*areq_ctx));
2310
2311         /* No generated IV required */
2312         areq_ctx->backup_iv = req->iv;
2313         areq_ctx->assoclen = req->assoclen;
2314         areq_ctx->backup_giv = NULL;
2315
2316         areq_ctx->plaintext_authenticate_only = false;
2317
2318         cc_proc_rfc4_gcm(req);
2319         areq_ctx->is_gcm4543 = true;
2320
2321         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2322         if (rc != -EINPROGRESS && rc != -EBUSY)
2323                 req->iv = areq_ctx->backup_iv;
2324 out:
2325         return rc;
2326 }
2327
2328 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2329 {
2330         /* Very similar to cc_aead_encrypt() above. */
2331
2332         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2333         int rc;
2334
2335         memset(areq_ctx, 0, sizeof(*areq_ctx));
2336
2337         //plaintext is not encryped with rfc4543
2338         areq_ctx->plaintext_authenticate_only = true;
2339
2340         /* No generated IV required */
2341         areq_ctx->backup_iv = req->iv;
2342         areq_ctx->assoclen = req->assoclen;
2343         areq_ctx->backup_giv = NULL;
2344
2345         cc_proc_rfc4_gcm(req);
2346         areq_ctx->is_gcm4543 = true;
2347
2348         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2349         if (rc != -EINPROGRESS && rc != -EBUSY)
2350                 req->iv = areq_ctx->backup_iv;
2351
2352         return rc;
2353 }
2354
2355 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2356 {
2357         /* Very similar to cc_aead_decrypt() above. */
2358
2359         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2360         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2361         struct device *dev = drvdata_to_dev(ctx->drvdata);
2362         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2363         int rc = -EINVAL;
2364
2365         if (!valid_assoclen(req)) {
2366                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2367                 goto out;
2368         }
2369
2370         memset(areq_ctx, 0, sizeof(*areq_ctx));
2371
2372         /* No generated IV required */
2373         areq_ctx->backup_iv = req->iv;
2374         areq_ctx->assoclen = req->assoclen;
2375         areq_ctx->backup_giv = NULL;
2376
2377         areq_ctx->plaintext_authenticate_only = false;
2378
2379         cc_proc_rfc4_gcm(req);
2380         areq_ctx->is_gcm4543 = true;
2381
2382         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2383         if (rc != -EINPROGRESS && rc != -EBUSY)
2384                 req->iv = areq_ctx->backup_iv;
2385 out:
2386         return rc;
2387 }
2388
2389 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2390 {
2391         /* Very similar to cc_aead_decrypt() above. */
2392
2393         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2394         int rc;
2395
2396         memset(areq_ctx, 0, sizeof(*areq_ctx));
2397
2398         //plaintext is not decryped with rfc4543
2399         areq_ctx->plaintext_authenticate_only = true;
2400
2401         /* No generated IV required */
2402         areq_ctx->backup_iv = req->iv;
2403         areq_ctx->assoclen = req->assoclen;
2404         areq_ctx->backup_giv = NULL;
2405
2406         cc_proc_rfc4_gcm(req);
2407         areq_ctx->is_gcm4543 = true;
2408
2409         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2410         if (rc != -EINPROGRESS && rc != -EBUSY)
2411                 req->iv = areq_ctx->backup_iv;
2412
2413         return rc;
2414 }
2415
2416 /* aead alg */
2417 static struct cc_alg_template aead_algs[] = {
2418         {
2419                 .name = "authenc(hmac(sha1),cbc(aes))",
2420                 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2421                 .blocksize = AES_BLOCK_SIZE,
2422                 .template_aead = {
2423                         .setkey = cc_aead_setkey,
2424                         .setauthsize = cc_aead_setauthsize,
2425                         .encrypt = cc_aead_encrypt,
2426                         .decrypt = cc_aead_decrypt,
2427                         .init = cc_aead_init,
2428                         .exit = cc_aead_exit,
2429                         .ivsize = AES_BLOCK_SIZE,
2430                         .maxauthsize = SHA1_DIGEST_SIZE,
2431                 },
2432                 .cipher_mode = DRV_CIPHER_CBC,
2433                 .flow_mode = S_DIN_to_AES,
2434                 .auth_mode = DRV_HASH_SHA1,
2435                 .min_hw_rev = CC_HW_REV_630,
2436                 .std_body = CC_STD_NIST,
2437         },
2438         {
2439                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2440                 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2441                 .blocksize = DES3_EDE_BLOCK_SIZE,
2442                 .template_aead = {
2443                         .setkey = cc_des3_aead_setkey,
2444                         .setauthsize = cc_aead_setauthsize,
2445                         .encrypt = cc_aead_encrypt,
2446                         .decrypt = cc_aead_decrypt,
2447                         .init = cc_aead_init,
2448                         .exit = cc_aead_exit,
2449                         .ivsize = DES3_EDE_BLOCK_SIZE,
2450                         .maxauthsize = SHA1_DIGEST_SIZE,
2451                 },
2452                 .cipher_mode = DRV_CIPHER_CBC,
2453                 .flow_mode = S_DIN_to_DES,
2454                 .auth_mode = DRV_HASH_SHA1,
2455                 .min_hw_rev = CC_HW_REV_630,
2456                 .std_body = CC_STD_NIST,
2457         },
2458         {
2459                 .name = "authenc(hmac(sha256),cbc(aes))",
2460                 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2461                 .blocksize = AES_BLOCK_SIZE,
2462                 .template_aead = {
2463                         .setkey = cc_aead_setkey,
2464                         .setauthsize = cc_aead_setauthsize,
2465                         .encrypt = cc_aead_encrypt,
2466                         .decrypt = cc_aead_decrypt,
2467                         .init = cc_aead_init,
2468                         .exit = cc_aead_exit,
2469                         .ivsize = AES_BLOCK_SIZE,
2470                         .maxauthsize = SHA256_DIGEST_SIZE,
2471                 },
2472                 .cipher_mode = DRV_CIPHER_CBC,
2473                 .flow_mode = S_DIN_to_AES,
2474                 .auth_mode = DRV_HASH_SHA256,
2475                 .min_hw_rev = CC_HW_REV_630,
2476                 .std_body = CC_STD_NIST,
2477         },
2478         {
2479                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2480                 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2481                 .blocksize = DES3_EDE_BLOCK_SIZE,
2482                 .template_aead = {
2483                         .setkey = cc_des3_aead_setkey,
2484                         .setauthsize = cc_aead_setauthsize,
2485                         .encrypt = cc_aead_encrypt,
2486                         .decrypt = cc_aead_decrypt,
2487                         .init = cc_aead_init,
2488                         .exit = cc_aead_exit,
2489                         .ivsize = DES3_EDE_BLOCK_SIZE,
2490                         .maxauthsize = SHA256_DIGEST_SIZE,
2491                 },
2492                 .cipher_mode = DRV_CIPHER_CBC,
2493                 .flow_mode = S_DIN_to_DES,
2494                 .auth_mode = DRV_HASH_SHA256,
2495                 .min_hw_rev = CC_HW_REV_630,
2496                 .std_body = CC_STD_NIST,
2497         },
2498         {
2499                 .name = "authenc(xcbc(aes),cbc(aes))",
2500                 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2501                 .blocksize = AES_BLOCK_SIZE,
2502                 .template_aead = {
2503                         .setkey = cc_aead_setkey,
2504                         .setauthsize = cc_aead_setauthsize,
2505                         .encrypt = cc_aead_encrypt,
2506                         .decrypt = cc_aead_decrypt,
2507                         .init = cc_aead_init,
2508                         .exit = cc_aead_exit,
2509                         .ivsize = AES_BLOCK_SIZE,
2510                         .maxauthsize = AES_BLOCK_SIZE,
2511                 },
2512                 .cipher_mode = DRV_CIPHER_CBC,
2513                 .flow_mode = S_DIN_to_AES,
2514                 .auth_mode = DRV_HASH_XCBC_MAC,
2515                 .min_hw_rev = CC_HW_REV_630,
2516                 .std_body = CC_STD_NIST,
2517         },
2518         {
2519                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2520                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2521                 .blocksize = 1,
2522                 .template_aead = {
2523                         .setkey = cc_aead_setkey,
2524                         .setauthsize = cc_aead_setauthsize,
2525                         .encrypt = cc_aead_encrypt,
2526                         .decrypt = cc_aead_decrypt,
2527                         .init = cc_aead_init,
2528                         .exit = cc_aead_exit,
2529                         .ivsize = CTR_RFC3686_IV_SIZE,
2530                         .maxauthsize = SHA1_DIGEST_SIZE,
2531                 },
2532                 .cipher_mode = DRV_CIPHER_CTR,
2533                 .flow_mode = S_DIN_to_AES,
2534                 .auth_mode = DRV_HASH_SHA1,
2535                 .min_hw_rev = CC_HW_REV_630,
2536                 .std_body = CC_STD_NIST,
2537         },
2538         {
2539                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2540                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2541                 .blocksize = 1,
2542                 .template_aead = {
2543                         .setkey = cc_aead_setkey,
2544                         .setauthsize = cc_aead_setauthsize,
2545                         .encrypt = cc_aead_encrypt,
2546                         .decrypt = cc_aead_decrypt,
2547                         .init = cc_aead_init,
2548                         .exit = cc_aead_exit,
2549                         .ivsize = CTR_RFC3686_IV_SIZE,
2550                         .maxauthsize = SHA256_DIGEST_SIZE,
2551                 },
2552                 .cipher_mode = DRV_CIPHER_CTR,
2553                 .flow_mode = S_DIN_to_AES,
2554                 .auth_mode = DRV_HASH_SHA256,
2555                 .min_hw_rev = CC_HW_REV_630,
2556                 .std_body = CC_STD_NIST,
2557         },
2558         {
2559                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2560                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2561                 .blocksize = 1,
2562                 .template_aead = {
2563                         .setkey = cc_aead_setkey,
2564                         .setauthsize = cc_aead_setauthsize,
2565                         .encrypt = cc_aead_encrypt,
2566                         .decrypt = cc_aead_decrypt,
2567                         .init = cc_aead_init,
2568                         .exit = cc_aead_exit,
2569                         .ivsize = CTR_RFC3686_IV_SIZE,
2570                         .maxauthsize = AES_BLOCK_SIZE,
2571                 },
2572                 .cipher_mode = DRV_CIPHER_CTR,
2573                 .flow_mode = S_DIN_to_AES,
2574                 .auth_mode = DRV_HASH_XCBC_MAC,
2575                 .min_hw_rev = CC_HW_REV_630,
2576                 .std_body = CC_STD_NIST,
2577         },
2578         {
2579                 .name = "ccm(aes)",
2580                 .driver_name = "ccm-aes-ccree",
2581                 .blocksize = 1,
2582                 .template_aead = {
2583                         .setkey = cc_aead_setkey,
2584                         .setauthsize = cc_ccm_setauthsize,
2585                         .encrypt = cc_aead_encrypt,
2586                         .decrypt = cc_aead_decrypt,
2587                         .init = cc_aead_init,
2588                         .exit = cc_aead_exit,
2589                         .ivsize = AES_BLOCK_SIZE,
2590                         .maxauthsize = AES_BLOCK_SIZE,
2591                 },
2592                 .cipher_mode = DRV_CIPHER_CCM,
2593                 .flow_mode = S_DIN_to_AES,
2594                 .auth_mode = DRV_HASH_NULL,
2595                 .min_hw_rev = CC_HW_REV_630,
2596                 .std_body = CC_STD_NIST,
2597         },
2598         {
2599                 .name = "rfc4309(ccm(aes))",
2600                 .driver_name = "rfc4309-ccm-aes-ccree",
2601                 .blocksize = 1,
2602                 .template_aead = {
2603                         .setkey = cc_rfc4309_ccm_setkey,
2604                         .setauthsize = cc_rfc4309_ccm_setauthsize,
2605                         .encrypt = cc_rfc4309_ccm_encrypt,
2606                         .decrypt = cc_rfc4309_ccm_decrypt,
2607                         .init = cc_aead_init,
2608                         .exit = cc_aead_exit,
2609                         .ivsize = CCM_BLOCK_IV_SIZE,
2610                         .maxauthsize = AES_BLOCK_SIZE,
2611                 },
2612                 .cipher_mode = DRV_CIPHER_CCM,
2613                 .flow_mode = S_DIN_to_AES,
2614                 .auth_mode = DRV_HASH_NULL,
2615                 .min_hw_rev = CC_HW_REV_630,
2616                 .std_body = CC_STD_NIST,
2617         },
2618         {
2619                 .name = "gcm(aes)",
2620                 .driver_name = "gcm-aes-ccree",
2621                 .blocksize = 1,
2622                 .template_aead = {
2623                         .setkey = cc_aead_setkey,
2624                         .setauthsize = cc_gcm_setauthsize,
2625                         .encrypt = cc_aead_encrypt,
2626                         .decrypt = cc_aead_decrypt,
2627                         .init = cc_aead_init,
2628                         .exit = cc_aead_exit,
2629                         .ivsize = 12,
2630                         .maxauthsize = AES_BLOCK_SIZE,
2631                 },
2632                 .cipher_mode = DRV_CIPHER_GCTR,
2633                 .flow_mode = S_DIN_to_AES,
2634                 .auth_mode = DRV_HASH_NULL,
2635                 .min_hw_rev = CC_HW_REV_630,
2636                 .std_body = CC_STD_NIST,
2637         },
2638         {
2639                 .name = "rfc4106(gcm(aes))",
2640                 .driver_name = "rfc4106-gcm-aes-ccree",
2641                 .blocksize = 1,
2642                 .template_aead = {
2643                         .setkey = cc_rfc4106_gcm_setkey,
2644                         .setauthsize = cc_rfc4106_gcm_setauthsize,
2645                         .encrypt = cc_rfc4106_gcm_encrypt,
2646                         .decrypt = cc_rfc4106_gcm_decrypt,
2647                         .init = cc_aead_init,
2648                         .exit = cc_aead_exit,
2649                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2650                         .maxauthsize = AES_BLOCK_SIZE,
2651                 },
2652                 .cipher_mode = DRV_CIPHER_GCTR,
2653                 .flow_mode = S_DIN_to_AES,
2654                 .auth_mode = DRV_HASH_NULL,
2655                 .min_hw_rev = CC_HW_REV_630,
2656                 .std_body = CC_STD_NIST,
2657         },
2658         {
2659                 .name = "rfc4543(gcm(aes))",
2660                 .driver_name = "rfc4543-gcm-aes-ccree",
2661                 .blocksize = 1,
2662                 .template_aead = {
2663                         .setkey = cc_rfc4543_gcm_setkey,
2664                         .setauthsize = cc_rfc4543_gcm_setauthsize,
2665                         .encrypt = cc_rfc4543_gcm_encrypt,
2666                         .decrypt = cc_rfc4543_gcm_decrypt,
2667                         .init = cc_aead_init,
2668                         .exit = cc_aead_exit,
2669                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2670                         .maxauthsize = AES_BLOCK_SIZE,
2671                 },
2672                 .cipher_mode = DRV_CIPHER_GCTR,
2673                 .flow_mode = S_DIN_to_AES,
2674                 .auth_mode = DRV_HASH_NULL,
2675                 .min_hw_rev = CC_HW_REV_630,
2676                 .std_body = CC_STD_NIST,
2677         },
2678 };
2679
2680 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2681                                                 struct device *dev)
2682 {
2683         struct cc_crypto_alg *t_alg;
2684         struct aead_alg *alg;
2685
2686         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2687         if (!t_alg)
2688                 return ERR_PTR(-ENOMEM);
2689
2690         alg = &tmpl->template_aead;
2691
2692         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2693         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2694                  tmpl->driver_name);
2695         alg->base.cra_module = THIS_MODULE;
2696         alg->base.cra_priority = CC_CRA_PRIO;
2697
2698         alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2699         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2700         alg->init = cc_aead_init;
2701         alg->exit = cc_aead_exit;
2702
2703         t_alg->aead_alg = *alg;
2704
2705         t_alg->cipher_mode = tmpl->cipher_mode;
2706         t_alg->flow_mode = tmpl->flow_mode;
2707         t_alg->auth_mode = tmpl->auth_mode;
2708
2709         return t_alg;
2710 }
2711
2712 int cc_aead_free(struct cc_drvdata *drvdata)
2713 {
2714         struct cc_crypto_alg *t_alg, *n;
2715         struct cc_aead_handle *aead_handle =
2716                 (struct cc_aead_handle *)drvdata->aead_handle;
2717
2718         if (aead_handle) {
2719                 /* Remove registered algs */
2720                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2721                                          entry) {
2722                         crypto_unregister_aead(&t_alg->aead_alg);
2723                         list_del(&t_alg->entry);
2724                         kfree(t_alg);
2725                 }
2726                 kfree(aead_handle);
2727                 drvdata->aead_handle = NULL;
2728         }
2729
2730         return 0;
2731 }
2732
2733 int cc_aead_alloc(struct cc_drvdata *drvdata)
2734 {
2735         struct cc_aead_handle *aead_handle;
2736         struct cc_crypto_alg *t_alg;
2737         int rc = -ENOMEM;
2738         int alg;
2739         struct device *dev = drvdata_to_dev(drvdata);
2740
2741         aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2742         if (!aead_handle) {
2743                 rc = -ENOMEM;
2744                 goto fail0;
2745         }
2746
2747         INIT_LIST_HEAD(&aead_handle->aead_list);
2748         drvdata->aead_handle = aead_handle;
2749
2750         aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2751                                                          MAX_HMAC_DIGEST_SIZE);
2752
2753         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2754                 dev_err(dev, "SRAM pool exhausted\n");
2755                 rc = -ENOMEM;
2756                 goto fail1;
2757         }
2758
2759         /* Linux crypto */
2760         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2761                 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2762                     !(drvdata->std_bodies & aead_algs[alg].std_body))
2763                         continue;
2764
2765                 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2766                 if (IS_ERR(t_alg)) {
2767                         rc = PTR_ERR(t_alg);
2768                         dev_err(dev, "%s alg allocation failed\n",
2769                                 aead_algs[alg].driver_name);
2770                         goto fail1;
2771                 }
2772                 t_alg->drvdata = drvdata;
2773                 rc = crypto_register_aead(&t_alg->aead_alg);
2774                 if (rc) {
2775                         dev_err(dev, "%s alg registration failed\n",
2776                                 t_alg->aead_alg.base.cra_driver_name);
2777                         goto fail2;
2778                 } else {
2779                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2780                         dev_dbg(dev, "Registered %s\n",
2781                                 t_alg->aead_alg.base.cra_driver_name);
2782                 }
2783         }
2784
2785         return 0;
2786
2787 fail2:
2788         kfree(t_alg);
2789 fail1:
2790         cc_aead_free(drvdata);
2791 fail0:
2792         return rc;
2793 }