716bbbdeb8bd5d07157459d1db3b9da887a60bf4
[linux-2.6-microblaze.git] / drivers / staging / ccree / ssi_hash.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
25
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_hash.h"
32 #include "ssi_sram_mgr.h"
33
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE SSI_MAX_HASH_BLCK_SIZE
36
37 struct ssi_hash_handle {
38         ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39         ssi_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
40         struct list_head hash_list;
41         struct completion init_comp;
42 };
43
44 static const u32 digest_len_init[] = {
45         0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init[] = {
47         SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48 static const u32 sha1_init[] = {
49         SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50 static const u32 sha224_init[] = {
51         SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52         SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53 static const u32 sha256_init[] = {
54         SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55         SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init[] = {
58         0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init[] = {
60         SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61         SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62 static const u64 sha512_init[] = {
63         SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64         SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
65 #endif
66
67 static void ssi_hash_create_xcbc_setup(
68         struct ahash_request *areq,
69         struct cc_hw_desc desc[],
70         unsigned int *seq_size);
71
72 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73                                        struct cc_hw_desc desc[],
74                                        unsigned int *seq_size);
75
76 struct ssi_hash_alg {
77         struct list_head entry;
78         int hash_mode;
79         int hw_mode;
80         int inter_digestsize;
81         struct ssi_drvdata *drvdata;
82         struct ahash_alg ahash_alg;
83 };
84
85 struct hash_key_req_ctx {
86         u32 keylen;
87         dma_addr_t key_dma_addr;
88 };
89
90 /* hash per-session context */
91 struct ssi_hash_ctx {
92         struct ssi_drvdata *drvdata;
93         /* holds the origin digest; the digest after "setkey" if HMAC,*
94          * the initial digest if HASH.
95          */
96         u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
97         u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
98
99         dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
100         dma_addr_t digest_buff_dma_addr;
101         /* use for hmac with key large then mode block size */
102         struct hash_key_req_ctx key_params;
103         int hash_mode;
104         int hw_mode;
105         int inter_digestsize;
106         struct completion setkey_comp;
107         bool is_hmac;
108 };
109
110 static void ssi_hash_create_data_desc(
111         struct ahash_req_ctx *areq_ctx,
112         struct ssi_hash_ctx *ctx,
113         unsigned int flow_mode, struct cc_hw_desc desc[],
114         bool is_not_last_data,
115         unsigned int *seq_size);
116
117 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
118 {
119         if (unlikely(mode == DRV_HASH_MD5 ||
120                      mode == DRV_HASH_SHA384 ||
121                      mode == DRV_HASH_SHA512)) {
122                 set_bytes_swap(desc, 1);
123         } else {
124                 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
125         }
126 }
127
128 static int ssi_hash_map_result(struct device *dev,
129                                struct ahash_req_ctx *state,
130                                unsigned int digestsize)
131 {
132         state->digest_result_dma_addr =
133                 dma_map_single(dev, (void *)state->digest_result_buff,
134                                digestsize,
135                                DMA_BIDIRECTIONAL);
136         if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137                 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
138                         digestsize);
139                 return -ENOMEM;
140         }
141         dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
142                 digestsize, state->digest_result_buff,
143                 &state->digest_result_dma_addr);
144
145         return 0;
146 }
147
148 static int ssi_hash_map_request(struct device *dev,
149                                 struct ahash_req_ctx *state,
150                                 struct ssi_hash_ctx *ctx)
151 {
152         bool is_hmac = ctx->is_hmac;
153         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
154                                         ctx->drvdata, ctx->hash_mode);
155         struct ssi_crypto_req ssi_req = {};
156         struct cc_hw_desc desc;
157         int rc = -ENOMEM;
158
159         state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
160         if (!state->buff0)
161                 goto fail0;
162
163         state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
164         if (!state->buff1)
165                 goto fail_buff0;
166
167         state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
168         if (!state->digest_result_buff)
169                 goto fail_buff1;
170
171         state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
172         if (!state->digest_buff)
173                 goto fail_digest_result_buff;
174
175         dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
176                 state->digest_buff);
177         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
178                 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
179                 if (!state->digest_bytes_len)
180                         goto fail1;
181
182                 dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183                         state->digest_bytes_len);
184         } else {
185                 state->digest_bytes_len = NULL;
186         }
187
188         state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
189         if (!state->opad_digest_buff)
190                 goto fail2;
191
192         dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193                 state->opad_digest_buff);
194
195         state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
196         if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
197                 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
198                         ctx->inter_digestsize, state->digest_buff);
199                 goto fail3;
200         }
201         dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
202                 ctx->inter_digestsize, state->digest_buff,
203                 &state->digest_buff_dma_addr);
204
205         if (is_hmac) {
206                 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
207                 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC || ctx->hw_mode == DRV_CIPHER_CMAC) {
208                         memset(state->digest_buff, 0, ctx->inter_digestsize);
209                 } else { /*sha*/
210                         memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
211 #if (DX_DEV_SHA_MAX > 256)
212                         if (unlikely(ctx->hash_mode == DRV_HASH_SHA512 || ctx->hash_mode == DRV_HASH_SHA384))
213                                 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
214                         else
215                                 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
216 #else
217                         memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
218 #endif
219                 }
220                 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
221
222                 if (ctx->hash_mode != DRV_HASH_NULL) {
223                         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
224                         memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
225                 }
226         } else { /*hash*/
227                 /* Copy the initial digests if hash flow. The SRAM contains the
228                  * initial digests in the expected order for all SHA*
229                  */
230                 hw_desc_init(&desc);
231                 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
232                 set_dout_dlli(&desc, state->digest_buff_dma_addr,
233                               ctx->inter_digestsize, NS_BIT, 0);
234                 set_flow_mode(&desc, BYPASS);
235
236                 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
237                 if (unlikely(rc)) {
238                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
239                         goto fail4;
240                 }
241         }
242
243         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
244                 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
245                 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
246                         dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
247                                 HASH_LEN_SIZE, state->digest_bytes_len);
248                         goto fail4;
249                 }
250                 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251                         HASH_LEN_SIZE, state->digest_bytes_len,
252                         &state->digest_bytes_len_dma_addr);
253         } else {
254                 state->digest_bytes_len_dma_addr = 0;
255         }
256
257         if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
258                 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
259                 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
260                         dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261                                 ctx->inter_digestsize,
262                                 state->opad_digest_buff);
263                         goto fail5;
264                 }
265                 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266                         ctx->inter_digestsize, state->opad_digest_buff,
267                         &state->opad_digest_dma_addr);
268         } else {
269                 state->opad_digest_dma_addr = 0;
270         }
271         state->buff0_cnt = 0;
272         state->buff1_cnt = 0;
273         state->buff_index = 0;
274         state->mlli_params.curr_pool = NULL;
275
276         return 0;
277
278 fail5:
279         if (state->digest_bytes_len_dma_addr) {
280                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
281                 state->digest_bytes_len_dma_addr = 0;
282         }
283 fail4:
284         if (state->digest_buff_dma_addr) {
285                 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
286                 state->digest_buff_dma_addr = 0;
287         }
288 fail3:
289         kfree(state->opad_digest_buff);
290 fail2:
291         kfree(state->digest_bytes_len);
292 fail1:
293          kfree(state->digest_buff);
294 fail_digest_result_buff:
295         kfree(state->digest_result_buff);
296         state->digest_result_buff = NULL;
297 fail_buff1:
298         kfree(state->buff1);
299         state->buff1 = NULL;
300 fail_buff0:
301         kfree(state->buff0);
302         state->buff0 = NULL;
303 fail0:
304         return rc;
305 }
306
307 static void ssi_hash_unmap_request(struct device *dev,
308                                    struct ahash_req_ctx *state,
309                                    struct ssi_hash_ctx *ctx)
310 {
311         if (state->digest_buff_dma_addr) {
312                 dma_unmap_single(dev, state->digest_buff_dma_addr,
313                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314                 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315                         &state->digest_buff_dma_addr);
316                 state->digest_buff_dma_addr = 0;
317         }
318         if (state->digest_bytes_len_dma_addr) {
319                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
320                                  HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
321                 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322                         &state->digest_bytes_len_dma_addr);
323                 state->digest_bytes_len_dma_addr = 0;
324         }
325         if (state->opad_digest_dma_addr) {
326                 dma_unmap_single(dev, state->opad_digest_dma_addr,
327                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
328                 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329                         &state->opad_digest_dma_addr);
330                 state->opad_digest_dma_addr = 0;
331         }
332
333         kfree(state->opad_digest_buff);
334         kfree(state->digest_bytes_len);
335         kfree(state->digest_buff);
336         kfree(state->digest_result_buff);
337         kfree(state->buff1);
338         kfree(state->buff0);
339 }
340
341 static void ssi_hash_unmap_result(struct device *dev,
342                                   struct ahash_req_ctx *state,
343                                   unsigned int digestsize, u8 *result)
344 {
345         if (state->digest_result_dma_addr) {
346                 dma_unmap_single(dev,
347                                  state->digest_result_dma_addr,
348                                  digestsize,
349                                   DMA_BIDIRECTIONAL);
350                 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351                         state->digest_result_buff,
352                         &state->digest_result_dma_addr, digestsize);
353                 memcpy(result,
354                        state->digest_result_buff,
355                        digestsize);
356         }
357         state->digest_result_dma_addr = 0;
358 }
359
360 static void ssi_hash_update_complete(struct device *dev, void *ssi_req)
361 {
362         struct ahash_request *req = (struct ahash_request *)ssi_req;
363         struct ahash_req_ctx *state = ahash_request_ctx(req);
364
365         dev_dbg(dev, "req=%pK\n", req);
366
367         cc_unmap_hash_request(dev, state, req->src, false);
368         req->base.complete(&req->base, 0);
369 }
370
371 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req)
372 {
373         struct ahash_request *req = (struct ahash_request *)ssi_req;
374         struct ahash_req_ctx *state = ahash_request_ctx(req);
375         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
377         u32 digestsize = crypto_ahash_digestsize(tfm);
378
379         dev_dbg(dev, "req=%pK\n", req);
380
381         cc_unmap_hash_request(dev, state, req->src, false);
382         ssi_hash_unmap_result(dev, state, digestsize, req->result);
383         ssi_hash_unmap_request(dev, state, ctx);
384         req->base.complete(&req->base, 0);
385 }
386
387 static void ssi_hash_complete(struct device *dev, void *ssi_req)
388 {
389         struct ahash_request *req = (struct ahash_request *)ssi_req;
390         struct ahash_req_ctx *state = ahash_request_ctx(req);
391         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
393         u32 digestsize = crypto_ahash_digestsize(tfm);
394
395         dev_dbg(dev, "req=%pK\n", req);
396
397         cc_unmap_hash_request(dev, state, req->src, false);
398         ssi_hash_unmap_result(dev, state, digestsize, req->result);
399         ssi_hash_unmap_request(dev, state, ctx);
400         req->base.complete(&req->base, 0);
401 }
402
403 static int ssi_hash_digest(struct ahash_req_ctx *state,
404                            struct ssi_hash_ctx *ctx,
405                            unsigned int digestsize,
406                            struct scatterlist *src,
407                            unsigned int nbytes, u8 *result,
408                            void *async_req)
409 {
410         struct device *dev = drvdata_to_dev(ctx->drvdata);
411         bool is_hmac = ctx->is_hmac;
412         struct ssi_crypto_req ssi_req = {};
413         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
414         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
415                                         ctx->drvdata, ctx->hash_mode);
416         int idx = 0;
417         int rc = 0;
418
419         dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
420                 nbytes);
421
422         if (unlikely(ssi_hash_map_request(dev, state, ctx))) {
423                 dev_err(dev, "map_ahash_source() failed\n");
424                 return -ENOMEM;
425         }
426
427         if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
428                 dev_err(dev, "map_ahash_digest() failed\n");
429                 return -ENOMEM;
430         }
431
432         if (unlikely(cc_map_hash_request_final(ctx->drvdata, state,
433                                                src, nbytes, 1))) {
434                 dev_err(dev, "map_ahash_request_final() failed\n");
435                 return -ENOMEM;
436         }
437
438         if (async_req) {
439                 /* Setup DX request structure */
440                 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
441                 ssi_req.user_arg = (void *)async_req;
442         }
443
444         /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
445         hw_desc_init(&desc[idx]);
446         set_cipher_mode(&desc[idx], ctx->hw_mode);
447         if (is_hmac) {
448                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
449                              ctx->inter_digestsize, NS_BIT);
450         } else {
451                 set_din_sram(&desc[idx], larval_digest_addr,
452                              ctx->inter_digestsize);
453         }
454         set_flow_mode(&desc[idx], S_DIN_to_HASH);
455         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
456         idx++;
457
458         /* Load the hash current length */
459         hw_desc_init(&desc[idx]);
460         set_cipher_mode(&desc[idx], ctx->hw_mode);
461
462         if (is_hmac) {
463                 set_din_type(&desc[idx], DMA_DLLI,
464                              state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
465                              NS_BIT);
466         } else {
467                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
468                 if (likely(nbytes))
469                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
470                 else
471                         set_cipher_do(&desc[idx], DO_PAD);
472         }
473         set_flow_mode(&desc[idx], S_DIN_to_HASH);
474         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
475         idx++;
476
477         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
478
479         if (is_hmac) {
480                 /* HW last hash block padding (aka. "DO_PAD") */
481                 hw_desc_init(&desc[idx]);
482                 set_cipher_mode(&desc[idx], ctx->hw_mode);
483                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
484                               HASH_LEN_SIZE, NS_BIT, 0);
485                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
486                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
487                 set_cipher_do(&desc[idx], DO_PAD);
488                 idx++;
489
490                 /* store the hash digest result in the context */
491                 hw_desc_init(&desc[idx]);
492                 set_cipher_mode(&desc[idx], ctx->hw_mode);
493                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
494                               digestsize, NS_BIT, 0);
495                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
496                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
497                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
498                 idx++;
499
500                 /* Loading hash opad xor key state */
501                 hw_desc_init(&desc[idx]);
502                 set_cipher_mode(&desc[idx], ctx->hw_mode);
503                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
504                              ctx->inter_digestsize, NS_BIT);
505                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
506                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
507                 idx++;
508
509                 /* Load the hash current length */
510                 hw_desc_init(&desc[idx]);
511                 set_cipher_mode(&desc[idx], ctx->hw_mode);
512                 set_din_sram(&desc[idx],
513                              ssi_ahash_get_initial_digest_len_sram_addr(
514 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
515                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
516                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
517                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
518                 idx++;
519
520                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
521                 hw_desc_init(&desc[idx]);
522                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
523                 set_dout_no_dma(&desc[idx], 0, 0, 1);
524                 idx++;
525
526                 /* Perform HASH update */
527                 hw_desc_init(&desc[idx]);
528                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
529                              digestsize, NS_BIT);
530                 set_flow_mode(&desc[idx], DIN_HASH);
531                 idx++;
532         }
533
534         /* Get final MAC result */
535         hw_desc_init(&desc[idx]);
536         set_cipher_mode(&desc[idx], ctx->hw_mode);
537         /* TODO */
538         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
539                       NS_BIT, (async_req ? 1 : 0));
540         if (async_req)
541                 set_queue_last_ind(&desc[idx]);
542         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
543         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
544         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
545         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
546         idx++;
547
548         if (async_req) {
549                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
550                 if (unlikely(rc != -EINPROGRESS)) {
551                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
552                         cc_unmap_hash_request(dev, state, src, true);
553                         ssi_hash_unmap_result(dev, state, digestsize, result);
554                         ssi_hash_unmap_request(dev, state, ctx);
555                 }
556         } else {
557                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
558                 if (rc) {
559                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
560                         cc_unmap_hash_request(dev, state, src, true);
561                 } else {
562                         cc_unmap_hash_request(dev, state, src, false);
563                 }
564                 ssi_hash_unmap_result(dev, state, digestsize, result);
565                 ssi_hash_unmap_request(dev, state, ctx);
566         }
567         return rc;
568 }
569
570 static int ssi_hash_update(struct ahash_req_ctx *state,
571                            struct ssi_hash_ctx *ctx,
572                            unsigned int block_size,
573                            struct scatterlist *src,
574                            unsigned int nbytes,
575                            void *async_req)
576 {
577         struct device *dev = drvdata_to_dev(ctx->drvdata);
578         struct ssi_crypto_req ssi_req = {};
579         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
580         u32 idx = 0;
581         int rc;
582
583         dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
584                 "hmac" : "hash", nbytes);
585
586         if (nbytes == 0) {
587                 /* no real updates required */
588                 return 0;
589         }
590
591         rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
592                                         block_size);
593         if (unlikely(rc)) {
594                 if (rc == 1) {
595                         dev_dbg(dev, " data size not require HW update %x\n",
596                                 nbytes);
597                         /* No hardware updates are required */
598                         return 0;
599                 }
600                 dev_err(dev, "map_ahash_request_update() failed\n");
601                 return -ENOMEM;
602         }
603
604         if (async_req) {
605                 /* Setup DX request structure */
606                 ssi_req.user_cb = (void *)ssi_hash_update_complete;
607                 ssi_req.user_arg = async_req;
608         }
609
610         /* Restore hash digest */
611         hw_desc_init(&desc[idx]);
612         set_cipher_mode(&desc[idx], ctx->hw_mode);
613         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
614                      ctx->inter_digestsize, NS_BIT);
615         set_flow_mode(&desc[idx], S_DIN_to_HASH);
616         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
617         idx++;
618         /* Restore hash current length */
619         hw_desc_init(&desc[idx]);
620         set_cipher_mode(&desc[idx], ctx->hw_mode);
621         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
622                      HASH_LEN_SIZE, NS_BIT);
623         set_flow_mode(&desc[idx], S_DIN_to_HASH);
624         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
625         idx++;
626
627         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
628
629         /* store the hash digest result in context */
630         hw_desc_init(&desc[idx]);
631         set_cipher_mode(&desc[idx], ctx->hw_mode);
632         set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
633                       ctx->inter_digestsize, NS_BIT, 0);
634         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
635         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
636         idx++;
637
638         /* store current hash length in context */
639         hw_desc_init(&desc[idx]);
640         set_cipher_mode(&desc[idx], ctx->hw_mode);
641         set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
642                       HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
643         if (async_req)
644                 set_queue_last_ind(&desc[idx]);
645         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
646         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
647         idx++;
648
649         if (async_req) {
650                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
651                 if (unlikely(rc != -EINPROGRESS)) {
652                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
653                         cc_unmap_hash_request(dev, state, src, true);
654                 }
655         } else {
656                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
657                 if (rc) {
658                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
659                         cc_unmap_hash_request(dev, state, src, true);
660                 } else {
661                         cc_unmap_hash_request(dev, state, src, false);
662                 }
663         }
664         return rc;
665 }
666
667 static int ssi_hash_finup(struct ahash_req_ctx *state,
668                           struct ssi_hash_ctx *ctx,
669                           unsigned int digestsize,
670                           struct scatterlist *src,
671                           unsigned int nbytes,
672                           u8 *result,
673                           void *async_req)
674 {
675         struct device *dev = drvdata_to_dev(ctx->drvdata);
676         bool is_hmac = ctx->is_hmac;
677         struct ssi_crypto_req ssi_req = {};
678         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
679         int idx = 0;
680         int rc;
681
682         dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
683                 nbytes);
684
685         if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
686                                                nbytes, 1))) {
687                 dev_err(dev, "map_ahash_request_final() failed\n");
688                 return -ENOMEM;
689         }
690         if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
691                 dev_err(dev, "map_ahash_digest() failed\n");
692                 return -ENOMEM;
693         }
694
695         if (async_req) {
696                 /* Setup DX request structure */
697                 ssi_req.user_cb = (void *)ssi_hash_complete;
698                 ssi_req.user_arg = async_req;
699         }
700
701         /* Restore hash digest */
702         hw_desc_init(&desc[idx]);
703         set_cipher_mode(&desc[idx], ctx->hw_mode);
704         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
705                      ctx->inter_digestsize, NS_BIT);
706         set_flow_mode(&desc[idx], S_DIN_to_HASH);
707         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
708         idx++;
709
710         /* Restore hash current length */
711         hw_desc_init(&desc[idx]);
712         set_cipher_mode(&desc[idx], ctx->hw_mode);
713         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
714         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
715                      HASH_LEN_SIZE, NS_BIT);
716         set_flow_mode(&desc[idx], S_DIN_to_HASH);
717         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
718         idx++;
719
720         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
721
722         if (is_hmac) {
723                 /* Store the hash digest result in the context */
724                 hw_desc_init(&desc[idx]);
725                 set_cipher_mode(&desc[idx], ctx->hw_mode);
726                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
727                               digestsize, NS_BIT, 0);
728                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
729                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
730                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
731                 idx++;
732
733                 /* Loading hash OPAD xor key state */
734                 hw_desc_init(&desc[idx]);
735                 set_cipher_mode(&desc[idx], ctx->hw_mode);
736                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
737                              ctx->inter_digestsize, NS_BIT);
738                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
739                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
740                 idx++;
741
742                 /* Load the hash current length */
743                 hw_desc_init(&desc[idx]);
744                 set_cipher_mode(&desc[idx], ctx->hw_mode);
745                 set_din_sram(&desc[idx],
746                              ssi_ahash_get_initial_digest_len_sram_addr(
747 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
748                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
749                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
750                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
751                 idx++;
752
753                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
754                 hw_desc_init(&desc[idx]);
755                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
756                 set_dout_no_dma(&desc[idx], 0, 0, 1);
757                 idx++;
758
759                 /* Perform HASH update on last digest */
760                 hw_desc_init(&desc[idx]);
761                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
762                              digestsize, NS_BIT);
763                 set_flow_mode(&desc[idx], DIN_HASH);
764                 idx++;
765         }
766
767         /* Get final MAC result */
768         hw_desc_init(&desc[idx]);
769         /* TODO */
770         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
771                       NS_BIT, (async_req ? 1 : 0));
772         if (async_req)
773                 set_queue_last_ind(&desc[idx]);
774         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
775         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
776         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
777         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
778         set_cipher_mode(&desc[idx], ctx->hw_mode);
779         idx++;
780
781         if (async_req) {
782                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
783                 if (unlikely(rc != -EINPROGRESS)) {
784                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
785                         cc_unmap_hash_request(dev, state, src, true);
786                         ssi_hash_unmap_result(dev, state, digestsize, result);
787                 }
788         } else {
789                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
790                 if (rc) {
791                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
792                         cc_unmap_hash_request(dev, state, src, true);
793                         ssi_hash_unmap_result(dev, state, digestsize, result);
794                 } else {
795                         cc_unmap_hash_request(dev, state, src, false);
796                         ssi_hash_unmap_result(dev, state, digestsize, result);
797                         ssi_hash_unmap_request(dev, state, ctx);
798                 }
799         }
800         return rc;
801 }
802
803 static int ssi_hash_final(struct ahash_req_ctx *state,
804                           struct ssi_hash_ctx *ctx,
805                           unsigned int digestsize,
806                           struct scatterlist *src,
807                           unsigned int nbytes,
808                           u8 *result,
809                           void *async_req)
810 {
811         struct device *dev = drvdata_to_dev(ctx->drvdata);
812         bool is_hmac = ctx->is_hmac;
813         struct ssi_crypto_req ssi_req = {};
814         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
815         int idx = 0;
816         int rc;
817
818         dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
819                 nbytes);
820
821         if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
822                                                nbytes, 0))) {
823                 dev_err(dev, "map_ahash_request_final() failed\n");
824                 return -ENOMEM;
825         }
826
827         if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
828                 dev_err(dev, "map_ahash_digest() failed\n");
829                 return -ENOMEM;
830         }
831
832         if (async_req) {
833                 /* Setup DX request structure */
834                 ssi_req.user_cb = (void *)ssi_hash_complete;
835                 ssi_req.user_arg = async_req;
836         }
837
838         /* Restore hash digest */
839         hw_desc_init(&desc[idx]);
840         set_cipher_mode(&desc[idx], ctx->hw_mode);
841         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
842                      ctx->inter_digestsize, NS_BIT);
843         set_flow_mode(&desc[idx], S_DIN_to_HASH);
844         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
845         idx++;
846
847         /* Restore hash current length */
848         hw_desc_init(&desc[idx]);
849         set_cipher_mode(&desc[idx], ctx->hw_mode);
850         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
851         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
852                      HASH_LEN_SIZE, NS_BIT);
853         set_flow_mode(&desc[idx], S_DIN_to_HASH);
854         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
855         idx++;
856
857         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
858
859         /* "DO-PAD" must be enabled only when writing current length to HW */
860         hw_desc_init(&desc[idx]);
861         set_cipher_do(&desc[idx], DO_PAD);
862         set_cipher_mode(&desc[idx], ctx->hw_mode);
863         set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
864                       HASH_LEN_SIZE, NS_BIT, 0);
865         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
866         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
867         idx++;
868
869         if (is_hmac) {
870                 /* Store the hash digest result in the context */
871                 hw_desc_init(&desc[idx]);
872                 set_cipher_mode(&desc[idx], ctx->hw_mode);
873                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
874                               digestsize, NS_BIT, 0);
875                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
876                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
877                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
878                 idx++;
879
880                 /* Loading hash OPAD xor key state */
881                 hw_desc_init(&desc[idx]);
882                 set_cipher_mode(&desc[idx], ctx->hw_mode);
883                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
884                              ctx->inter_digestsize, NS_BIT);
885                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
886                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
887                 idx++;
888
889                 /* Load the hash current length */
890                 hw_desc_init(&desc[idx]);
891                 set_cipher_mode(&desc[idx], ctx->hw_mode);
892                 set_din_sram(&desc[idx],
893                              ssi_ahash_get_initial_digest_len_sram_addr(
894 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
895                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
896                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
897                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
898                 idx++;
899
900                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
901                 hw_desc_init(&desc[idx]);
902                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
903                 set_dout_no_dma(&desc[idx], 0, 0, 1);
904                 idx++;
905
906                 /* Perform HASH update on last digest */
907                 hw_desc_init(&desc[idx]);
908                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
909                              digestsize, NS_BIT);
910                 set_flow_mode(&desc[idx], DIN_HASH);
911                 idx++;
912         }
913
914         /* Get final MAC result */
915         hw_desc_init(&desc[idx]);
916         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
917                       NS_BIT, (async_req ? 1 : 0));
918         if (async_req)
919                 set_queue_last_ind(&desc[idx]);
920         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
921         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
922         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
923         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
924         set_cipher_mode(&desc[idx], ctx->hw_mode);
925         idx++;
926
927         if (async_req) {
928                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
929                 if (unlikely(rc != -EINPROGRESS)) {
930                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
931                         cc_unmap_hash_request(dev, state, src, true);
932                         ssi_hash_unmap_result(dev, state, digestsize, result);
933                 }
934         } else {
935                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
936                 if (rc) {
937                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
938                         cc_unmap_hash_request(dev, state, src, true);
939                         ssi_hash_unmap_result(dev, state, digestsize, result);
940                 } else {
941                         cc_unmap_hash_request(dev, state, src, false);
942                         ssi_hash_unmap_result(dev, state, digestsize, result);
943                         ssi_hash_unmap_request(dev, state, ctx);
944                 }
945         }
946         return rc;
947 }
948
949 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
950 {
951         struct device *dev = drvdata_to_dev(ctx->drvdata);
952
953         state->xcbc_count = 0;
954
955         ssi_hash_map_request(dev, state, ctx);
956
957         return 0;
958 }
959
960 static int ssi_hash_setkey(void *hash,
961                            const u8 *key,
962                            unsigned int keylen,
963                            bool synchronize)
964 {
965         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
966         struct ssi_crypto_req ssi_req = {};
967         struct ssi_hash_ctx *ctx = NULL;
968         int blocksize = 0;
969         int digestsize = 0;
970         int i, idx = 0, rc = 0;
971         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
972         ssi_sram_addr_t larval_addr;
973         struct device *dev;
974
975         ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
976         dev = drvdata_to_dev(ctx->drvdata);
977         dev_dbg(dev, "start keylen: %d", keylen);
978
979         blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
980         digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
981
982         larval_addr = ssi_ahash_get_larval_digest_sram_addr(
983                                         ctx->drvdata, ctx->hash_mode);
984
985         /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
986          * any NON-ZERO value utilizes HMAC flow
987          */
988         ctx->key_params.keylen = keylen;
989         ctx->key_params.key_dma_addr = 0;
990         ctx->is_hmac = true;
991
992         if (keylen) {
993                 ctx->key_params.key_dma_addr = dma_map_single(
994                                                 dev, (void *)key,
995                                                 keylen, DMA_TO_DEVICE);
996                 if (unlikely(dma_mapping_error(dev,
997                                                ctx->key_params.key_dma_addr))) {
998                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
999                                 key, keylen);
1000                         return -ENOMEM;
1001                 }
1002                 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1003                         &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1004
1005                 if (keylen > blocksize) {
1006                         /* Load hash initial state */
1007                         hw_desc_init(&desc[idx]);
1008                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1009                         set_din_sram(&desc[idx], larval_addr,
1010                                      ctx->inter_digestsize);
1011                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1012                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1013                         idx++;
1014
1015                         /* Load the hash current length*/
1016                         hw_desc_init(&desc[idx]);
1017                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1018                         set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1019                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1020                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1021                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1022                         idx++;
1023
1024                         hw_desc_init(&desc[idx]);
1025                         set_din_type(&desc[idx], DMA_DLLI,
1026                                      ctx->key_params.key_dma_addr, keylen,
1027                                      NS_BIT);
1028                         set_flow_mode(&desc[idx], DIN_HASH);
1029                         idx++;
1030
1031                         /* Get hashed key */
1032                         hw_desc_init(&desc[idx]);
1033                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1034                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1035                                       digestsize, NS_BIT, 0);
1036                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1037                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1038                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1039                         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1040                         idx++;
1041
1042                         hw_desc_init(&desc[idx]);
1043                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
1044                         set_flow_mode(&desc[idx], BYPASS);
1045                         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1046                                                    digestsize),
1047                                       (blocksize - digestsize), NS_BIT, 0);
1048                         idx++;
1049                 } else {
1050                         hw_desc_init(&desc[idx]);
1051                         set_din_type(&desc[idx], DMA_DLLI,
1052                                      ctx->key_params.key_dma_addr, keylen,
1053                                      NS_BIT);
1054                         set_flow_mode(&desc[idx], BYPASS);
1055                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1056                                       keylen, NS_BIT, 0);
1057                         idx++;
1058
1059                         if ((blocksize - keylen)) {
1060                                 hw_desc_init(&desc[idx]);
1061                                 set_din_const(&desc[idx], 0,
1062                                               (blocksize - keylen));
1063                                 set_flow_mode(&desc[idx], BYPASS);
1064                                 set_dout_dlli(&desc[idx],
1065                                               (ctx->opad_tmp_keys_dma_addr +
1066                                                keylen), (blocksize - keylen),
1067                                               NS_BIT, 0);
1068                                 idx++;
1069                         }
1070                 }
1071         } else {
1072                 hw_desc_init(&desc[idx]);
1073                 set_din_const(&desc[idx], 0, blocksize);
1074                 set_flow_mode(&desc[idx], BYPASS);
1075                 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1076                               blocksize, NS_BIT, 0);
1077                 idx++;
1078         }
1079
1080         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1081         if (unlikely(rc)) {
1082                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1083                 goto out;
1084         }
1085
1086         /* calc derived HMAC key */
1087         for (idx = 0, i = 0; i < 2; i++) {
1088                 /* Load hash initial state */
1089                 hw_desc_init(&desc[idx]);
1090                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1091                 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1092                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1093                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1094                 idx++;
1095
1096                 /* Load the hash current length*/
1097                 hw_desc_init(&desc[idx]);
1098                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1099                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1100                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1101                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1102                 idx++;
1103
1104                 /* Prepare ipad key */
1105                 hw_desc_init(&desc[idx]);
1106                 set_xor_val(&desc[idx], hmac_pad_const[i]);
1107                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1108                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1109                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1110                 idx++;
1111
1112                 /* Perform HASH update */
1113                 hw_desc_init(&desc[idx]);
1114                 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1115                              blocksize, NS_BIT);
1116                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1117                 set_xor_active(&desc[idx]);
1118                 set_flow_mode(&desc[idx], DIN_HASH);
1119                 idx++;
1120
1121                 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1122                 hw_desc_init(&desc[idx]);
1123                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1124                 if (i > 0) /* Not first iteration */
1125                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1126                                       ctx->inter_digestsize, NS_BIT, 0);
1127                 else /* First iteration */
1128                         set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1129                                       ctx->inter_digestsize, NS_BIT, 0);
1130                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1131                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1132                 idx++;
1133         }
1134
1135         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1136
1137 out:
1138         if (rc)
1139                 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1140
1141         if (ctx->key_params.key_dma_addr) {
1142                 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1143                                  ctx->key_params.keylen, DMA_TO_DEVICE);
1144                 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1145                         &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1146         }
1147         return rc;
1148 }
1149
1150 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1151                            const u8 *key, unsigned int keylen)
1152 {
1153         struct ssi_crypto_req ssi_req = {};
1154         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1155         struct device *dev = drvdata_to_dev(ctx->drvdata);
1156         int idx = 0, rc = 0;
1157         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1158
1159         dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1160
1161         switch (keylen) {
1162         case AES_KEYSIZE_128:
1163         case AES_KEYSIZE_192:
1164         case AES_KEYSIZE_256:
1165                 break;
1166         default:
1167                 return -EINVAL;
1168         }
1169
1170         ctx->key_params.keylen = keylen;
1171
1172         ctx->key_params.key_dma_addr = dma_map_single(
1173                                         dev, (void *)key,
1174                                         keylen, DMA_TO_DEVICE);
1175         if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
1176                 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1177                         key, keylen);
1178                 return -ENOMEM;
1179         }
1180         dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1181                 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1182
1183         ctx->is_hmac = true;
1184         /* 1. Load the AES key */
1185         hw_desc_init(&desc[idx]);
1186         set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1187                      keylen, NS_BIT);
1188         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1189         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1190         set_key_size_aes(&desc[idx], keylen);
1191         set_flow_mode(&desc[idx], S_DIN_to_AES);
1192         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1193         idx++;
1194
1195         hw_desc_init(&desc[idx]);
1196         set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1197         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1198         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1199                                            XCBC_MAC_K1_OFFSET),
1200                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1201         idx++;
1202
1203         hw_desc_init(&desc[idx]);
1204         set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1205         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1206         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1207                                            XCBC_MAC_K2_OFFSET),
1208                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1209         idx++;
1210
1211         hw_desc_init(&desc[idx]);
1212         set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1213         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1214         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1215                                            XCBC_MAC_K3_OFFSET),
1216                                CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1217         idx++;
1218
1219         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1220
1221         if (rc)
1222                 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1223
1224         dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1225                          ctx->key_params.keylen, DMA_TO_DEVICE);
1226         dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1227                 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1228
1229         return rc;
1230 }
1231
1232 #if SSI_CC_HAS_CMAC
1233 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1234                            const u8 *key, unsigned int keylen)
1235 {
1236         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1237         struct device *dev = drvdata_to_dev(ctx->drvdata);
1238
1239         dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1240
1241         ctx->is_hmac = true;
1242
1243         switch (keylen) {
1244         case AES_KEYSIZE_128:
1245         case AES_KEYSIZE_192:
1246         case AES_KEYSIZE_256:
1247                 break;
1248         default:
1249                 return -EINVAL;
1250         }
1251
1252         ctx->key_params.keylen = keylen;
1253
1254         /* STAT_PHASE_1: Copy key to ctx */
1255
1256         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1257                                 keylen, DMA_TO_DEVICE);
1258
1259         memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1260         if (keylen == 24)
1261                 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1262
1263         dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1264                                    keylen, DMA_TO_DEVICE);
1265
1266         ctx->key_params.keylen = keylen;
1267
1268         return 0;
1269 }
1270 #endif
1271
1272 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1273 {
1274         struct device *dev = drvdata_to_dev(ctx->drvdata);
1275
1276         if (ctx->digest_buff_dma_addr) {
1277                 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1278                                  sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1279                 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1280                         &ctx->digest_buff_dma_addr);
1281                 ctx->digest_buff_dma_addr = 0;
1282         }
1283         if (ctx->opad_tmp_keys_dma_addr) {
1284                 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1285                                  sizeof(ctx->opad_tmp_keys_buff),
1286                                  DMA_BIDIRECTIONAL);
1287                 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1288                         &ctx->opad_tmp_keys_dma_addr);
1289                 ctx->opad_tmp_keys_dma_addr = 0;
1290         }
1291
1292         ctx->key_params.keylen = 0;
1293 }
1294
1295 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1296 {
1297         struct device *dev = drvdata_to_dev(ctx->drvdata);
1298
1299         ctx->key_params.keylen = 0;
1300
1301         ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1302         if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1303                 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1304                         sizeof(ctx->digest_buff), ctx->digest_buff);
1305                 goto fail;
1306         }
1307         dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1308                 sizeof(ctx->digest_buff), ctx->digest_buff,
1309                 &ctx->digest_buff_dma_addr);
1310
1311         ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1312         if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1313                 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1314                         sizeof(ctx->opad_tmp_keys_buff),
1315                         ctx->opad_tmp_keys_buff);
1316                 goto fail;
1317         }
1318         dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1319                 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1320                 &ctx->opad_tmp_keys_dma_addr);
1321
1322         ctx->is_hmac = false;
1323         return 0;
1324
1325 fail:
1326         ssi_hash_free_ctx(ctx);
1327         return -ENOMEM;
1328 }
1329
1330 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1331 {
1332         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1333         struct hash_alg_common *hash_alg_common =
1334                 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1335         struct ahash_alg *ahash_alg =
1336                 container_of(hash_alg_common, struct ahash_alg, halg);
1337         struct ssi_hash_alg *ssi_alg =
1338                         container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1339
1340         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1341                                  sizeof(struct ahash_req_ctx));
1342
1343         ctx->hash_mode = ssi_alg->hash_mode;
1344         ctx->hw_mode = ssi_alg->hw_mode;
1345         ctx->inter_digestsize = ssi_alg->inter_digestsize;
1346         ctx->drvdata = ssi_alg->drvdata;
1347
1348         return ssi_hash_alloc_ctx(ctx);
1349 }
1350
1351 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1352 {
1353         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1354         struct device *dev = drvdata_to_dev(ctx->drvdata);
1355
1356         dev_dbg(dev, "ssi_hash_cra_exit");
1357         ssi_hash_free_ctx(ctx);
1358 }
1359
1360 static int ssi_mac_update(struct ahash_request *req)
1361 {
1362         struct ahash_req_ctx *state = ahash_request_ctx(req);
1363         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1364         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1365         struct device *dev = drvdata_to_dev(ctx->drvdata);
1366         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1367         struct ssi_crypto_req ssi_req = {};
1368         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1369         int rc;
1370         u32 idx = 0;
1371
1372         if (req->nbytes == 0) {
1373                 /* no real updates required */
1374                 return 0;
1375         }
1376
1377         state->xcbc_count++;
1378
1379         rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1380                                         req->nbytes, block_size);
1381         if (unlikely(rc)) {
1382                 if (rc == 1) {
1383                         dev_dbg(dev, " data size not require HW update %x\n",
1384                                 req->nbytes);
1385                         /* No hardware updates are required */
1386                         return 0;
1387                 }
1388                 dev_err(dev, "map_ahash_request_update() failed\n");
1389                 return -ENOMEM;
1390         }
1391
1392         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1393                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1394         else
1395                 ssi_hash_create_cmac_setup(req, desc, &idx);
1396
1397         ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1398
1399         /* store the hash digest result in context */
1400         hw_desc_init(&desc[idx]);
1401         set_cipher_mode(&desc[idx], ctx->hw_mode);
1402         set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1403                       ctx->inter_digestsize, NS_BIT, 1);
1404         set_queue_last_ind(&desc[idx]);
1405         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1407         idx++;
1408
1409         /* Setup DX request structure */
1410         ssi_req.user_cb = (void *)ssi_hash_update_complete;
1411         ssi_req.user_arg = (void *)req;
1412
1413         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1414         if (unlikely(rc != -EINPROGRESS)) {
1415                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1416                 cc_unmap_hash_request(dev, state, req->src, true);
1417         }
1418         return rc;
1419 }
1420
1421 static int ssi_mac_final(struct ahash_request *req)
1422 {
1423         struct ahash_req_ctx *state = ahash_request_ctx(req);
1424         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1425         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1426         struct device *dev = drvdata_to_dev(ctx->drvdata);
1427         struct ssi_crypto_req ssi_req = {};
1428         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1429         int idx = 0;
1430         int rc = 0;
1431         u32 key_size, key_len;
1432         u32 digestsize = crypto_ahash_digestsize(tfm);
1433
1434         u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1435                         state->buff0_cnt;
1436
1437         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1438                 key_size = CC_AES_128_BIT_KEY_SIZE;
1439                 key_len  = CC_AES_128_BIT_KEY_SIZE;
1440         } else {
1441                 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1442                         ctx->key_params.keylen;
1443                 key_len =  ctx->key_params.keylen;
1444         }
1445
1446         dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1447
1448         if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
1449                                                req->nbytes, 0))) {
1450                 dev_err(dev, "map_ahash_request_final() failed\n");
1451                 return -ENOMEM;
1452         }
1453
1454         if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
1455                 dev_err(dev, "map_ahash_digest() failed\n");
1456                 return -ENOMEM;
1457         }
1458
1459         /* Setup DX request structure */
1460         ssi_req.user_cb = (void *)ssi_hash_complete;
1461         ssi_req.user_arg = (void *)req;
1462
1463         if (state->xcbc_count && rem_cnt == 0) {
1464                 /* Load key for ECB decryption */
1465                 hw_desc_init(&desc[idx]);
1466                 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1467                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1468                 set_din_type(&desc[idx], DMA_DLLI,
1469                              (ctx->opad_tmp_keys_dma_addr +
1470                               XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1471                 set_key_size_aes(&desc[idx], key_len);
1472                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1473                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1474                 idx++;
1475
1476                 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1477                 hw_desc_init(&desc[idx]);
1478                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1479                              CC_AES_BLOCK_SIZE, NS_BIT);
1480                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1481                               CC_AES_BLOCK_SIZE, NS_BIT, 0);
1482                 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1483                 idx++;
1484
1485                 /* Memory Barrier: wait for axi write to complete */
1486                 hw_desc_init(&desc[idx]);
1487                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1488                 set_dout_no_dma(&desc[idx], 0, 0, 1);
1489                 idx++;
1490         }
1491
1492         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1493                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1494         else
1495                 ssi_hash_create_cmac_setup(req, desc, &idx);
1496
1497         if (state->xcbc_count == 0) {
1498                 hw_desc_init(&desc[idx]);
1499                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1500                 set_key_size_aes(&desc[idx], key_len);
1501                 set_cmac_size0_mode(&desc[idx]);
1502                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1503                 idx++;
1504         } else if (rem_cnt > 0) {
1505                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1506         } else {
1507                 hw_desc_init(&desc[idx]);
1508                 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1509                 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1510                 idx++;
1511         }
1512
1513         /* Get final MAC result */
1514         hw_desc_init(&desc[idx]);
1515         /* TODO */
1516         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1517                       digestsize, NS_BIT, 1);
1518         set_queue_last_ind(&desc[idx]);
1519         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1520         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1521         set_cipher_mode(&desc[idx], ctx->hw_mode);
1522         idx++;
1523
1524         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1525         if (unlikely(rc != -EINPROGRESS)) {
1526                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1527                 cc_unmap_hash_request(dev, state, req->src, true);
1528                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1529         }
1530         return rc;
1531 }
1532
1533 static int ssi_mac_finup(struct ahash_request *req)
1534 {
1535         struct ahash_req_ctx *state = ahash_request_ctx(req);
1536         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1537         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1538         struct device *dev = drvdata_to_dev(ctx->drvdata);
1539         struct ssi_crypto_req ssi_req = {};
1540         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1541         int idx = 0;
1542         int rc = 0;
1543         u32 key_len = 0;
1544         u32 digestsize = crypto_ahash_digestsize(tfm);
1545
1546         dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1547         if (state->xcbc_count > 0 && req->nbytes == 0) {
1548                 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1549                 return ssi_mac_final(req);
1550         }
1551
1552         if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
1553                                                req->nbytes, 1))) {
1554                 dev_err(dev, "map_ahash_request_final() failed\n");
1555                 return -ENOMEM;
1556         }
1557         if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
1558                 dev_err(dev, "map_ahash_digest() failed\n");
1559                 return -ENOMEM;
1560         }
1561
1562         /* Setup DX request structure */
1563         ssi_req.user_cb = (void *)ssi_hash_complete;
1564         ssi_req.user_arg = (void *)req;
1565
1566         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1567                 key_len = CC_AES_128_BIT_KEY_SIZE;
1568                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1569         } else {
1570                 key_len = ctx->key_params.keylen;
1571                 ssi_hash_create_cmac_setup(req, desc, &idx);
1572         }
1573
1574         if (req->nbytes == 0) {
1575                 hw_desc_init(&desc[idx]);
1576                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1577                 set_key_size_aes(&desc[idx], key_len);
1578                 set_cmac_size0_mode(&desc[idx]);
1579                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1580                 idx++;
1581         } else {
1582                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1583         }
1584
1585         /* Get final MAC result */
1586         hw_desc_init(&desc[idx]);
1587         /* TODO */
1588         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1589                       digestsize, NS_BIT, 1);
1590         set_queue_last_ind(&desc[idx]);
1591         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1592         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1593         set_cipher_mode(&desc[idx], ctx->hw_mode);
1594         idx++;
1595
1596         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1597         if (unlikely(rc != -EINPROGRESS)) {
1598                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1599                 cc_unmap_hash_request(dev, state, req->src, true);
1600                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1601         }
1602         return rc;
1603 }
1604
1605 static int ssi_mac_digest(struct ahash_request *req)
1606 {
1607         struct ahash_req_ctx *state = ahash_request_ctx(req);
1608         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1609         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1610         struct device *dev = drvdata_to_dev(ctx->drvdata);
1611         u32 digestsize = crypto_ahash_digestsize(tfm);
1612         struct ssi_crypto_req ssi_req = {};
1613         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1614         u32 key_len;
1615         int idx = 0;
1616         int rc;
1617
1618         dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1619
1620         if (unlikely(ssi_hash_map_request(dev, state, ctx))) {
1621                 dev_err(dev, "map_ahash_source() failed\n");
1622                 return -ENOMEM;
1623         }
1624         if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
1625                 dev_err(dev, "map_ahash_digest() failed\n");
1626                 return -ENOMEM;
1627         }
1628
1629         if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
1630                                                req->nbytes, 1))) {
1631                 dev_err(dev, "map_ahash_request_final() failed\n");
1632                 return -ENOMEM;
1633         }
1634
1635         /* Setup DX request structure */
1636         ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1637         ssi_req.user_arg = (void *)req;
1638
1639         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1640                 key_len = CC_AES_128_BIT_KEY_SIZE;
1641                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1642         } else {
1643                 key_len = ctx->key_params.keylen;
1644                 ssi_hash_create_cmac_setup(req, desc, &idx);
1645         }
1646
1647         if (req->nbytes == 0) {
1648                 hw_desc_init(&desc[idx]);
1649                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1650                 set_key_size_aes(&desc[idx], key_len);
1651                 set_cmac_size0_mode(&desc[idx]);
1652                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1653                 idx++;
1654         } else {
1655                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1656         }
1657
1658         /* Get final MAC result */
1659         hw_desc_init(&desc[idx]);
1660         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1661                       CC_AES_BLOCK_SIZE, NS_BIT, 1);
1662         set_queue_last_ind(&desc[idx]);
1663         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1664         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1665         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1666         set_cipher_mode(&desc[idx], ctx->hw_mode);
1667         idx++;
1668
1669         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1670         if (unlikely(rc != -EINPROGRESS)) {
1671                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1672                 cc_unmap_hash_request(dev, state, req->src, true);
1673                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1674                 ssi_hash_unmap_request(dev, state, ctx);
1675         }
1676         return rc;
1677 }
1678
1679 //ahash wrap functions
1680 static int ssi_ahash_digest(struct ahash_request *req)
1681 {
1682         struct ahash_req_ctx *state = ahash_request_ctx(req);
1683         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1684         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1685         u32 digestsize = crypto_ahash_digestsize(tfm);
1686
1687         return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1688 }
1689
1690 static int ssi_ahash_update(struct ahash_request *req)
1691 {
1692         struct ahash_req_ctx *state = ahash_request_ctx(req);
1693         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1694         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1695         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1696
1697         return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1698 }
1699
1700 static int ssi_ahash_finup(struct ahash_request *req)
1701 {
1702         struct ahash_req_ctx *state = ahash_request_ctx(req);
1703         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1704         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1705         u32 digestsize = crypto_ahash_digestsize(tfm);
1706
1707         return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1708 }
1709
1710 static int ssi_ahash_final(struct ahash_request *req)
1711 {
1712         struct ahash_req_ctx *state = ahash_request_ctx(req);
1713         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1714         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1715         u32 digestsize = crypto_ahash_digestsize(tfm);
1716
1717         return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1718 }
1719
1720 static int ssi_ahash_init(struct ahash_request *req)
1721 {
1722         struct ahash_req_ctx *state = ahash_request_ctx(req);
1723         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1724         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1725         struct device *dev = drvdata_to_dev(ctx->drvdata);
1726
1727         dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
1728
1729         return ssi_hash_init(state, ctx);
1730 }
1731
1732 static int ssi_ahash_export(struct ahash_request *req, void *out)
1733 {
1734         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1735         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1736         struct device *dev = drvdata_to_dev(ctx->drvdata);
1737         struct ahash_req_ctx *state = ahash_request_ctx(req);
1738         u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1739         u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1740                                 state->buff0_cnt;
1741         const u32 tmp = CC_EXPORT_MAGIC;
1742
1743         memcpy(out, &tmp, sizeof(u32));
1744         out += sizeof(u32);
1745
1746         dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1747                                 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1748         memcpy(out, state->digest_buff, ctx->inter_digestsize);
1749         out += ctx->inter_digestsize;
1750
1751         if (state->digest_bytes_len_dma_addr) {
1752                 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1753                                         HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1754                 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1755         } else {
1756                 /* Poison the unused exported digest len field. */
1757                 memset(out, 0x5F, HASH_LEN_SIZE);
1758         }
1759         out += HASH_LEN_SIZE;
1760
1761         memcpy(out, &curr_buff_cnt, sizeof(u32));
1762         out += sizeof(u32);
1763
1764         memcpy(out, curr_buff, curr_buff_cnt);
1765
1766         /* No sync for device ineeded since we did not change the data,
1767          * we only copy it
1768          */
1769
1770         return 0;
1771 }
1772
1773 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1774 {
1775         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1776         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1777         struct device *dev = drvdata_to_dev(ctx->drvdata);
1778         struct ahash_req_ctx *state = ahash_request_ctx(req);
1779         u32 tmp;
1780         int rc;
1781
1782         memcpy(&tmp, in, sizeof(u32));
1783         if (tmp != CC_EXPORT_MAGIC) {
1784                 rc = -EINVAL;
1785                 goto out;
1786         }
1787         in += sizeof(u32);
1788
1789         rc = ssi_hash_init(state, ctx);
1790         if (rc)
1791                 goto out;
1792
1793         dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1794                                 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1795         memcpy(state->digest_buff, in, ctx->inter_digestsize);
1796         in += ctx->inter_digestsize;
1797
1798         if (state->digest_bytes_len_dma_addr) {
1799                 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1800                                         HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1801                 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1802         }
1803         in += HASH_LEN_SIZE;
1804
1805         dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1806                                    ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1807
1808         if (state->digest_bytes_len_dma_addr)
1809                 dma_sync_single_for_device(dev,
1810                                            state->digest_bytes_len_dma_addr,
1811                                            HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1812
1813         state->buff_index = 0;
1814
1815         /* Sanity check the data as much as possible */
1816         memcpy(&tmp, in, sizeof(u32));
1817         if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1818                 rc = -EINVAL;
1819                 goto out;
1820         }
1821         in += sizeof(u32);
1822
1823         state->buff0_cnt = tmp;
1824         memcpy(state->buff0, in, state->buff0_cnt);
1825
1826 out:
1827         return rc;
1828 }
1829
1830 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1831                             const u8 *key, unsigned int keylen)
1832 {
1833         return ssi_hash_setkey((void *)ahash, key, keylen, false);
1834 }
1835
1836 struct ssi_hash_template {
1837         char name[CRYPTO_MAX_ALG_NAME];
1838         char driver_name[CRYPTO_MAX_ALG_NAME];
1839         char mac_name[CRYPTO_MAX_ALG_NAME];
1840         char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1841         unsigned int blocksize;
1842         bool synchronize;
1843         struct ahash_alg template_ahash;
1844         int hash_mode;
1845         int hw_mode;
1846         int inter_digestsize;
1847         struct ssi_drvdata *drvdata;
1848 };
1849
1850 #define CC_STATE_SIZE(_x) \
1851         ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1852
1853 /* hash descriptors */
1854 static struct ssi_hash_template driver_hash[] = {
1855         //Asynchronize hash template
1856         {
1857                 .name = "sha1",
1858                 .driver_name = "sha1-dx",
1859                 .mac_name = "hmac(sha1)",
1860                 .mac_driver_name = "hmac-sha1-dx",
1861                 .blocksize = SHA1_BLOCK_SIZE,
1862                 .synchronize = false,
1863                 .template_ahash = {
1864                         .init = ssi_ahash_init,
1865                         .update = ssi_ahash_update,
1866                         .final = ssi_ahash_final,
1867                         .finup = ssi_ahash_finup,
1868                         .digest = ssi_ahash_digest,
1869                         .export = ssi_ahash_export,
1870                         .import = ssi_ahash_import,
1871                         .setkey = ssi_ahash_setkey,
1872                         .halg = {
1873                                 .digestsize = SHA1_DIGEST_SIZE,
1874                                 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1875                         },
1876                 },
1877                 .hash_mode = DRV_HASH_SHA1,
1878                 .hw_mode = DRV_HASH_HW_SHA1,
1879                 .inter_digestsize = SHA1_DIGEST_SIZE,
1880         },
1881         {
1882                 .name = "sha256",
1883                 .driver_name = "sha256-dx",
1884                 .mac_name = "hmac(sha256)",
1885                 .mac_driver_name = "hmac-sha256-dx",
1886                 .blocksize = SHA256_BLOCK_SIZE,
1887                 .template_ahash = {
1888                         .init = ssi_ahash_init,
1889                         .update = ssi_ahash_update,
1890                         .final = ssi_ahash_final,
1891                         .finup = ssi_ahash_finup,
1892                         .digest = ssi_ahash_digest,
1893                         .export = ssi_ahash_export,
1894                         .import = ssi_ahash_import,
1895                         .setkey = ssi_ahash_setkey,
1896                         .halg = {
1897                                 .digestsize = SHA256_DIGEST_SIZE,
1898                                 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1899                         },
1900                 },
1901                 .hash_mode = DRV_HASH_SHA256,
1902                 .hw_mode = DRV_HASH_HW_SHA256,
1903                 .inter_digestsize = SHA256_DIGEST_SIZE,
1904         },
1905         {
1906                 .name = "sha224",
1907                 .driver_name = "sha224-dx",
1908                 .mac_name = "hmac(sha224)",
1909                 .mac_driver_name = "hmac-sha224-dx",
1910                 .blocksize = SHA224_BLOCK_SIZE,
1911                 .template_ahash = {
1912                         .init = ssi_ahash_init,
1913                         .update = ssi_ahash_update,
1914                         .final = ssi_ahash_final,
1915                         .finup = ssi_ahash_finup,
1916                         .digest = ssi_ahash_digest,
1917                         .export = ssi_ahash_export,
1918                         .import = ssi_ahash_import,
1919                         .setkey = ssi_ahash_setkey,
1920                         .halg = {
1921                                 .digestsize = SHA224_DIGEST_SIZE,
1922                                 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1923                         },
1924                 },
1925                 .hash_mode = DRV_HASH_SHA224,
1926                 .hw_mode = DRV_HASH_HW_SHA256,
1927                 .inter_digestsize = SHA256_DIGEST_SIZE,
1928         },
1929 #if (DX_DEV_SHA_MAX > 256)
1930         {
1931                 .name = "sha384",
1932                 .driver_name = "sha384-dx",
1933                 .mac_name = "hmac(sha384)",
1934                 .mac_driver_name = "hmac-sha384-dx",
1935                 .blocksize = SHA384_BLOCK_SIZE,
1936                 .template_ahash = {
1937                         .init = ssi_ahash_init,
1938                         .update = ssi_ahash_update,
1939                         .final = ssi_ahash_final,
1940                         .finup = ssi_ahash_finup,
1941                         .digest = ssi_ahash_digest,
1942                         .export = ssi_ahash_export,
1943                         .import = ssi_ahash_import,
1944                         .setkey = ssi_ahash_setkey,
1945                         .halg = {
1946                                 .digestsize = SHA384_DIGEST_SIZE,
1947                                 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1948                         },
1949                 },
1950                 .hash_mode = DRV_HASH_SHA384,
1951                 .hw_mode = DRV_HASH_HW_SHA512,
1952                 .inter_digestsize = SHA512_DIGEST_SIZE,
1953         },
1954         {
1955                 .name = "sha512",
1956                 .driver_name = "sha512-dx",
1957                 .mac_name = "hmac(sha512)",
1958                 .mac_driver_name = "hmac-sha512-dx",
1959                 .blocksize = SHA512_BLOCK_SIZE,
1960                 .template_ahash = {
1961                         .init = ssi_ahash_init,
1962                         .update = ssi_ahash_update,
1963                         .final = ssi_ahash_final,
1964                         .finup = ssi_ahash_finup,
1965                         .digest = ssi_ahash_digest,
1966                         .export = ssi_ahash_export,
1967                         .import = ssi_ahash_import,
1968                         .setkey = ssi_ahash_setkey,
1969                         .halg = {
1970                                 .digestsize = SHA512_DIGEST_SIZE,
1971                                 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1972                         },
1973                 },
1974                 .hash_mode = DRV_HASH_SHA512,
1975                 .hw_mode = DRV_HASH_HW_SHA512,
1976                 .inter_digestsize = SHA512_DIGEST_SIZE,
1977         },
1978 #endif
1979         {
1980                 .name = "md5",
1981                 .driver_name = "md5-dx",
1982                 .mac_name = "hmac(md5)",
1983                 .mac_driver_name = "hmac-md5-dx",
1984                 .blocksize = MD5_HMAC_BLOCK_SIZE,
1985                 .template_ahash = {
1986                         .init = ssi_ahash_init,
1987                         .update = ssi_ahash_update,
1988                         .final = ssi_ahash_final,
1989                         .finup = ssi_ahash_finup,
1990                         .digest = ssi_ahash_digest,
1991                         .export = ssi_ahash_export,
1992                         .import = ssi_ahash_import,
1993                         .setkey = ssi_ahash_setkey,
1994                         .halg = {
1995                                 .digestsize = MD5_DIGEST_SIZE,
1996                                 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1997                         },
1998                 },
1999                 .hash_mode = DRV_HASH_MD5,
2000                 .hw_mode = DRV_HASH_HW_MD5,
2001                 .inter_digestsize = MD5_DIGEST_SIZE,
2002         },
2003         {
2004                 .mac_name = "xcbc(aes)",
2005                 .mac_driver_name = "xcbc-aes-dx",
2006                 .blocksize = AES_BLOCK_SIZE,
2007                 .template_ahash = {
2008                         .init = ssi_ahash_init,
2009                         .update = ssi_mac_update,
2010                         .final = ssi_mac_final,
2011                         .finup = ssi_mac_finup,
2012                         .digest = ssi_mac_digest,
2013                         .setkey = ssi_xcbc_setkey,
2014                         .export = ssi_ahash_export,
2015                         .import = ssi_ahash_import,
2016                         .halg = {
2017                                 .digestsize = AES_BLOCK_SIZE,
2018                                 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2019                         },
2020                 },
2021                 .hash_mode = DRV_HASH_NULL,
2022                 .hw_mode = DRV_CIPHER_XCBC_MAC,
2023                 .inter_digestsize = AES_BLOCK_SIZE,
2024         },
2025 #if SSI_CC_HAS_CMAC
2026         {
2027                 .mac_name = "cmac(aes)",
2028                 .mac_driver_name = "cmac-aes-dx",
2029                 .blocksize = AES_BLOCK_SIZE,
2030                 .template_ahash = {
2031                         .init = ssi_ahash_init,
2032                         .update = ssi_mac_update,
2033                         .final = ssi_mac_final,
2034                         .finup = ssi_mac_finup,
2035                         .digest = ssi_mac_digest,
2036                         .setkey = ssi_cmac_setkey,
2037                         .export = ssi_ahash_export,
2038                         .import = ssi_ahash_import,
2039                         .halg = {
2040                                 .digestsize = AES_BLOCK_SIZE,
2041                                 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2042                         },
2043                 },
2044                 .hash_mode = DRV_HASH_NULL,
2045                 .hw_mode = DRV_CIPHER_CMAC,
2046                 .inter_digestsize = AES_BLOCK_SIZE,
2047         },
2048 #endif
2049
2050 };
2051
2052 static struct ssi_hash_alg *
2053 ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
2054                     bool keyed)
2055 {
2056         struct ssi_hash_alg *t_crypto_alg;
2057         struct crypto_alg *alg;
2058         struct ahash_alg *halg;
2059
2060         t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2061         if (!t_crypto_alg)
2062                 return ERR_PTR(-ENOMEM);
2063
2064         t_crypto_alg->ahash_alg = template->template_ahash;
2065         halg = &t_crypto_alg->ahash_alg;
2066         alg = &halg->halg.base;
2067
2068         if (keyed) {
2069                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2070                          template->mac_name);
2071                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2072                          template->mac_driver_name);
2073         } else {
2074                 halg->setkey = NULL;
2075                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2076                          template->name);
2077                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2078                          template->driver_name);
2079         }
2080         alg->cra_module = THIS_MODULE;
2081         alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2082         alg->cra_priority = SSI_CRA_PRIO;
2083         alg->cra_blocksize = template->blocksize;
2084         alg->cra_alignmask = 0;
2085         alg->cra_exit = ssi_hash_cra_exit;
2086
2087         alg->cra_init = ssi_ahash_cra_init;
2088         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2089                         CRYPTO_ALG_KERN_DRIVER_ONLY;
2090         alg->cra_type = &crypto_ahash_type;
2091
2092         t_crypto_alg->hash_mode = template->hash_mode;
2093         t_crypto_alg->hw_mode = template->hw_mode;
2094         t_crypto_alg->inter_digestsize = template->inter_digestsize;
2095
2096         return t_crypto_alg;
2097 }
2098
2099 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2100 {
2101         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2102         ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2103         unsigned int larval_seq_len = 0;
2104         struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2105         struct device *dev = drvdata_to_dev(drvdata);
2106         int rc = 0;
2107 #if (DX_DEV_SHA_MAX > 256)
2108         int i;
2109 #endif
2110
2111         /* Copy-to-sram digest-len */
2112         cc_set_sram_desc(digest_len_init, sram_buff_ofs,
2113                          ARRAY_SIZE(digest_len_init), larval_seq,
2114                          &larval_seq_len);
2115         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2116         if (unlikely(rc))
2117                 goto init_digest_const_err;
2118
2119         sram_buff_ofs += sizeof(digest_len_init);
2120         larval_seq_len = 0;
2121
2122 #if (DX_DEV_SHA_MAX > 256)
2123         /* Copy-to-sram digest-len for sha384/512 */
2124         cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
2125                          ARRAY_SIZE(digest_len_sha512_init),
2126                          larval_seq, &larval_seq_len);
2127         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2128         if (unlikely(rc))
2129                 goto init_digest_const_err;
2130
2131         sram_buff_ofs += sizeof(digest_len_sha512_init);
2132         larval_seq_len = 0;
2133 #endif
2134
2135         /* The initial digests offset */
2136         hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2137
2138         /* Copy-to-sram initial SHA* digests */
2139         cc_set_sram_desc(md5_init, sram_buff_ofs,
2140                          ARRAY_SIZE(md5_init), larval_seq,
2141                          &larval_seq_len);
2142         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2143         if (unlikely(rc))
2144                 goto init_digest_const_err;
2145         sram_buff_ofs += sizeof(md5_init);
2146         larval_seq_len = 0;
2147
2148         cc_set_sram_desc(sha1_init, sram_buff_ofs,
2149                          ARRAY_SIZE(sha1_init), larval_seq,
2150                          &larval_seq_len);
2151         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2152         if (unlikely(rc))
2153                 goto init_digest_const_err;
2154         sram_buff_ofs += sizeof(sha1_init);
2155         larval_seq_len = 0;
2156
2157         cc_set_sram_desc(sha224_init, sram_buff_ofs,
2158                          ARRAY_SIZE(sha224_init), larval_seq,
2159                          &larval_seq_len);
2160         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2161         if (unlikely(rc))
2162                 goto init_digest_const_err;
2163         sram_buff_ofs += sizeof(sha224_init);
2164         larval_seq_len = 0;
2165
2166         cc_set_sram_desc(sha256_init, sram_buff_ofs,
2167                          ARRAY_SIZE(sha256_init), larval_seq,
2168                          &larval_seq_len);
2169         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2170         if (unlikely(rc))
2171                 goto init_digest_const_err;
2172         sram_buff_ofs += sizeof(sha256_init);
2173         larval_seq_len = 0;
2174
2175 #if (DX_DEV_SHA_MAX > 256)
2176         /* We are forced to swap each double-word larval before copying to sram */
2177         for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2178                 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2179                 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2180
2181                 cc_set_sram_desc(&const0, sram_buff_ofs, 1, larval_seq,
2182                                  &larval_seq_len);
2183                 sram_buff_ofs += sizeof(u32);
2184                 cc_set_sram_desc(&const1, sram_buff_ofs, 1, larval_seq,
2185                                  &larval_seq_len);
2186                 sram_buff_ofs += sizeof(u32);
2187         }
2188         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2189         if (unlikely(rc)) {
2190                 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2191                 goto init_digest_const_err;
2192         }
2193         larval_seq_len = 0;
2194
2195         for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2196                 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2197                 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2198
2199                 cc_set_sram_desc(&const0, sram_buff_ofs, 1, larval_seq,
2200                                  &larval_seq_len);
2201                 sram_buff_ofs += sizeof(u32);
2202                 cc_set_sram_desc(&const1, sram_buff_ofs, 1, larval_seq,
2203                                  &larval_seq_len);
2204                 sram_buff_ofs += sizeof(u32);
2205         }
2206         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2207         if (unlikely(rc)) {
2208                 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2209                 goto init_digest_const_err;
2210         }
2211 #endif
2212
2213 init_digest_const_err:
2214         return rc;
2215 }
2216
2217 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2218 {
2219         struct ssi_hash_handle *hash_handle;
2220         ssi_sram_addr_t sram_buff;
2221         u32 sram_size_to_alloc;
2222         struct device *dev = drvdata_to_dev(drvdata);
2223         int rc = 0;
2224         int alg;
2225
2226         hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2227         if (!hash_handle)
2228                 return -ENOMEM;
2229
2230         INIT_LIST_HEAD(&hash_handle->hash_list);
2231         drvdata->hash_handle = hash_handle;
2232
2233         sram_size_to_alloc = sizeof(digest_len_init) +
2234 #if (DX_DEV_SHA_MAX > 256)
2235                         sizeof(digest_len_sha512_init) +
2236                         sizeof(sha384_init) +
2237                         sizeof(sha512_init) +
2238 #endif
2239                         sizeof(md5_init) +
2240                         sizeof(sha1_init) +
2241                         sizeof(sha224_init) +
2242                         sizeof(sha256_init);
2243
2244         sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2245         if (sram_buff == NULL_SRAM_ADDR) {
2246                 dev_err(dev, "SRAM pool exhausted\n");
2247                 rc = -ENOMEM;
2248                 goto fail;
2249         }
2250
2251         /* The initial digest-len offset */
2252         hash_handle->digest_len_sram_addr = sram_buff;
2253
2254         /*must be set before the alg registration as it is being used there*/
2255         rc = ssi_hash_init_sram_digest_consts(drvdata);
2256         if (unlikely(rc)) {
2257                 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2258                 goto fail;
2259         }
2260
2261         /* ahash registration */
2262         for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2263                 struct ssi_hash_alg *t_alg;
2264                 int hw_mode = driver_hash[alg].hw_mode;
2265
2266                 /* register hmac version */
2267                 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
2268                 if (IS_ERR(t_alg)) {
2269                         rc = PTR_ERR(t_alg);
2270                         dev_err(dev, "%s alg allocation failed\n",
2271                                 driver_hash[alg].driver_name);
2272                         goto fail;
2273                 }
2274                 t_alg->drvdata = drvdata;
2275
2276                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2277                 if (unlikely(rc)) {
2278                         dev_err(dev, "%s alg registration failed\n",
2279                                 driver_hash[alg].driver_name);
2280                         kfree(t_alg);
2281                         goto fail;
2282                 } else {
2283                         list_add_tail(&t_alg->entry,
2284                                       &hash_handle->hash_list);
2285                 }
2286
2287                 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2288                     hw_mode == DRV_CIPHER_CMAC)
2289                         continue;
2290
2291                 /* register hash version */
2292                 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
2293                 if (IS_ERR(t_alg)) {
2294                         rc = PTR_ERR(t_alg);
2295                         dev_err(dev, "%s alg allocation failed\n",
2296                                 driver_hash[alg].driver_name);
2297                         goto fail;
2298                 }
2299                 t_alg->drvdata = drvdata;
2300
2301                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2302                 if (unlikely(rc)) {
2303                         dev_err(dev, "%s alg registration failed\n",
2304                                 driver_hash[alg].driver_name);
2305                         kfree(t_alg);
2306                         goto fail;
2307                 } else {
2308                         list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2309                 }
2310         }
2311
2312         return 0;
2313
2314 fail:
2315         kfree(drvdata->hash_handle);
2316         drvdata->hash_handle = NULL;
2317         return rc;
2318 }
2319
2320 int ssi_hash_free(struct ssi_drvdata *drvdata)
2321 {
2322         struct ssi_hash_alg *t_hash_alg, *hash_n;
2323         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2324
2325         if (hash_handle) {
2326                 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2327                         crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2328                         list_del(&t_hash_alg->entry);
2329                         kfree(t_hash_alg);
2330                 }
2331
2332                 kfree(hash_handle);
2333                 drvdata->hash_handle = NULL;
2334         }
2335         return 0;
2336 }
2337
2338 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2339                                        struct cc_hw_desc desc[],
2340                                        unsigned int *seq_size)
2341 {
2342         unsigned int idx = *seq_size;
2343         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2344         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2345         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2346
2347         /* Setup XCBC MAC K1 */
2348         hw_desc_init(&desc[idx]);
2349         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2350                                             XCBC_MAC_K1_OFFSET),
2351                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2352         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2353         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2354         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2355         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2356         set_flow_mode(&desc[idx], S_DIN_to_AES);
2357         idx++;
2358
2359         /* Setup XCBC MAC K2 */
2360         hw_desc_init(&desc[idx]);
2361         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2362                                             XCBC_MAC_K2_OFFSET),
2363                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2364         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2365         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2366         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2367         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2368         set_flow_mode(&desc[idx], S_DIN_to_AES);
2369         idx++;
2370
2371         /* Setup XCBC MAC K3 */
2372         hw_desc_init(&desc[idx]);
2373         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2374                                             XCBC_MAC_K3_OFFSET),
2375                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2376         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2377         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2378         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2379         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2380         set_flow_mode(&desc[idx], S_DIN_to_AES);
2381         idx++;
2382
2383         /* Loading MAC state */
2384         hw_desc_init(&desc[idx]);
2385         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2386                      CC_AES_BLOCK_SIZE, NS_BIT);
2387         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2388         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2389         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2390         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2391         set_flow_mode(&desc[idx], S_DIN_to_AES);
2392         idx++;
2393         *seq_size = idx;
2394 }
2395
2396 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2397                                        struct cc_hw_desc desc[],
2398                                        unsigned int *seq_size)
2399 {
2400         unsigned int idx = *seq_size;
2401         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2402         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2403         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2404
2405         /* Setup CMAC Key */
2406         hw_desc_init(&desc[idx]);
2407         set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2408                      ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2409                       ctx->key_params.keylen), NS_BIT);
2410         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2411         set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2412         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2413         set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2414         set_flow_mode(&desc[idx], S_DIN_to_AES);
2415         idx++;
2416
2417         /* Load MAC state */
2418         hw_desc_init(&desc[idx]);
2419         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2420                      CC_AES_BLOCK_SIZE, NS_BIT);
2421         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2422         set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2423         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2424         set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2425         set_flow_mode(&desc[idx], S_DIN_to_AES);
2426         idx++;
2427         *seq_size = idx;
2428 }
2429
2430 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2431                                       struct ssi_hash_ctx *ctx,
2432                                       unsigned int flow_mode,
2433                                       struct cc_hw_desc desc[],
2434                                       bool is_not_last_data,
2435                                       unsigned int *seq_size)
2436 {
2437         unsigned int idx = *seq_size;
2438         struct device *dev = drvdata_to_dev(ctx->drvdata);
2439
2440         if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2441                 hw_desc_init(&desc[idx]);
2442                 set_din_type(&desc[idx], DMA_DLLI,
2443                              sg_dma_address(areq_ctx->curr_sg),
2444                              areq_ctx->curr_sg->length, NS_BIT);
2445                 set_flow_mode(&desc[idx], flow_mode);
2446                 idx++;
2447         } else {
2448                 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2449                         dev_dbg(dev, " NULL mode\n");
2450                         /* nothing to build */
2451                         return;
2452                 }
2453                 /* bypass */
2454                 hw_desc_init(&desc[idx]);
2455                 set_din_type(&desc[idx], DMA_DLLI,
2456                              areq_ctx->mlli_params.mlli_dma_addr,
2457                              areq_ctx->mlli_params.mlli_len, NS_BIT);
2458                 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2459                               areq_ctx->mlli_params.mlli_len);
2460                 set_flow_mode(&desc[idx], BYPASS);
2461                 idx++;
2462                 /* process */
2463                 hw_desc_init(&desc[idx]);
2464                 set_din_type(&desc[idx], DMA_MLLI,
2465                              ctx->drvdata->mlli_sram_addr,
2466                              areq_ctx->mlli_nents, NS_BIT);
2467                 set_flow_mode(&desc[idx], flow_mode);
2468                 idx++;
2469         }
2470         if (is_not_last_data)
2471                 set_din_not_last_indication(&desc[(idx - 1)]);
2472         /* return updated desc sequence size */
2473         *seq_size = idx;
2474 }
2475
2476 /*!
2477  * Gets the address of the initial digest in SRAM
2478  * according to the given hash mode
2479  *
2480  * \param drvdata
2481  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2482  *
2483  * \return u32 The address of the initial digest in SRAM
2484  */
2485 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2486 {
2487         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2488         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2489         struct device *dev = drvdata_to_dev(_drvdata);
2490
2491         switch (mode) {
2492         case DRV_HASH_NULL:
2493                 break; /*Ignore*/
2494         case DRV_HASH_MD5:
2495                 return (hash_handle->larval_digest_sram_addr);
2496         case DRV_HASH_SHA1:
2497                 return (hash_handle->larval_digest_sram_addr +
2498                         sizeof(md5_init));
2499         case DRV_HASH_SHA224:
2500                 return (hash_handle->larval_digest_sram_addr +
2501                         sizeof(md5_init) +
2502                         sizeof(sha1_init));
2503         case DRV_HASH_SHA256:
2504                 return (hash_handle->larval_digest_sram_addr +
2505                         sizeof(md5_init) +
2506                         sizeof(sha1_init) +
2507                         sizeof(sha224_init));
2508 #if (DX_DEV_SHA_MAX > 256)
2509         case DRV_HASH_SHA384:
2510                 return (hash_handle->larval_digest_sram_addr +
2511                         sizeof(md5_init) +
2512                         sizeof(sha1_init) +
2513                         sizeof(sha224_init) +
2514                         sizeof(sha256_init));
2515         case DRV_HASH_SHA512:
2516                 return (hash_handle->larval_digest_sram_addr +
2517                         sizeof(md5_init) +
2518                         sizeof(sha1_init) +
2519                         sizeof(sha224_init) +
2520                         sizeof(sha256_init) +
2521                         sizeof(sha384_init));
2522 #endif
2523         default:
2524                 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2525         }
2526
2527         /*This is valid wrong value to avoid kernel crash*/
2528         return hash_handle->larval_digest_sram_addr;
2529 }
2530
2531 ssi_sram_addr_t
2532 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2533 {
2534         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2535         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2536         ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2537
2538         switch (mode) {
2539         case DRV_HASH_SHA1:
2540         case DRV_HASH_SHA224:
2541         case DRV_HASH_SHA256:
2542         case DRV_HASH_MD5:
2543                 return digest_len_addr;
2544 #if (DX_DEV_SHA_MAX > 256)
2545         case DRV_HASH_SHA384:
2546         case DRV_HASH_SHA512:
2547                 return  digest_len_addr + sizeof(digest_len_init);
2548 #endif
2549         default:
2550                 return digest_len_addr; /*to avoid kernel crash*/
2551         }
2552 }
2553