2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
32 #include "ssi_sram_mgr.h"
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE SSI_MAX_HASH_BLCK_SIZE
37 struct ssi_hash_handle {
38 ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39 ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
40 struct list_head hash_list;
41 struct completion init_comp;
44 static const u32 digest_len_init[] = {
45 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init[] = {
47 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48 static const u32 sha1_init[] = {
49 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50 static const u32 sha224_init[] = {
51 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53 static const u32 sha256_init[] = {
54 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init[] = {
58 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init[] = {
60 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62 static const u64 sha512_init[] = {
63 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
67 static void ssi_hash_create_xcbc_setup(
68 struct ahash_request *areq,
69 struct cc_hw_desc desc[],
70 unsigned int *seq_size);
72 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73 struct cc_hw_desc desc[],
74 unsigned int *seq_size);
77 struct list_head entry;
81 struct ssi_drvdata *drvdata;
82 struct ahash_alg ahash_alg;
85 struct hash_key_req_ctx {
87 dma_addr_t key_dma_addr;
90 /* hash per-session context */
92 struct ssi_drvdata *drvdata;
93 /* holds the origin digest; the digest after "setkey" if HMAC,*
94 * the initial digest if HASH.
96 u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
97 u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
99 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
100 dma_addr_t digest_buff_dma_addr;
101 /* use for hmac with key large then mode block size */
102 struct hash_key_req_ctx key_params;
105 int inter_digestsize;
106 struct completion setkey_comp;
110 static void ssi_hash_create_data_desc(
111 struct ahash_req_ctx *areq_ctx,
112 struct ssi_hash_ctx *ctx,
113 unsigned int flow_mode, struct cc_hw_desc desc[],
114 bool is_not_last_data,
115 unsigned int *seq_size);
117 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
119 if (unlikely(mode == DRV_HASH_MD5 ||
120 mode == DRV_HASH_SHA384 ||
121 mode == DRV_HASH_SHA512)) {
122 set_bytes_swap(desc, 1);
124 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
128 static int ssi_hash_map_result(struct device *dev,
129 struct ahash_req_ctx *state,
130 unsigned int digestsize)
132 state->digest_result_dma_addr =
133 dma_map_single(dev, (void *)state->digest_result_buff,
136 if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
141 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
142 digestsize, state->digest_result_buff,
143 &state->digest_result_dma_addr);
148 static int ssi_hash_map_request(struct device *dev,
149 struct ahash_req_ctx *state,
150 struct ssi_hash_ctx *ctx)
152 bool is_hmac = ctx->is_hmac;
153 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
154 ctx->drvdata, ctx->hash_mode);
155 struct ssi_crypto_req ssi_req = {};
156 struct cc_hw_desc desc;
159 state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
163 state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
167 state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
168 if (!state->digest_result_buff)
171 state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
172 if (!state->digest_buff)
173 goto fail_digest_result_buff;
175 dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
177 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
178 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
179 if (!state->digest_bytes_len)
182 dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183 state->digest_bytes_len);
185 state->digest_bytes_len = NULL;
188 state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
189 if (!state->opad_digest_buff)
192 dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193 state->opad_digest_buff);
195 state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
196 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
197 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
198 ctx->inter_digestsize, state->digest_buff);
201 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
202 ctx->inter_digestsize, state->digest_buff,
203 &state->digest_buff_dma_addr);
206 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
207 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC || ctx->hw_mode == DRV_CIPHER_CMAC) {
208 memset(state->digest_buff, 0, ctx->inter_digestsize);
210 memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
211 #if (DX_DEV_SHA_MAX > 256)
212 if (unlikely(ctx->hash_mode == DRV_HASH_SHA512 || ctx->hash_mode == DRV_HASH_SHA384))
213 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
215 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
217 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
220 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
222 if (ctx->hash_mode != DRV_HASH_NULL) {
223 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
224 memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
227 /* Copy the initial digests if hash flow. The SRAM contains the
228 * initial digests in the expected order for all SHA*
231 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
232 set_dout_dlli(&desc, state->digest_buff_dma_addr,
233 ctx->inter_digestsize, NS_BIT, 0);
234 set_flow_mode(&desc, BYPASS);
236 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
238 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
243 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
244 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
245 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
246 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
247 HASH_LEN_SIZE, state->digest_bytes_len);
250 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251 HASH_LEN_SIZE, state->digest_bytes_len,
252 &state->digest_bytes_len_dma_addr);
254 state->digest_bytes_len_dma_addr = 0;
257 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
258 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
259 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
260 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261 ctx->inter_digestsize,
262 state->opad_digest_buff);
265 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266 ctx->inter_digestsize, state->opad_digest_buff,
267 &state->opad_digest_dma_addr);
269 state->opad_digest_dma_addr = 0;
271 state->buff0_cnt = 0;
272 state->buff1_cnt = 0;
273 state->buff_index = 0;
274 state->mlli_params.curr_pool = NULL;
279 if (state->digest_bytes_len_dma_addr) {
280 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
281 state->digest_bytes_len_dma_addr = 0;
284 if (state->digest_buff_dma_addr) {
285 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
286 state->digest_buff_dma_addr = 0;
289 kfree(state->opad_digest_buff);
291 kfree(state->digest_bytes_len);
293 kfree(state->digest_buff);
294 fail_digest_result_buff:
295 kfree(state->digest_result_buff);
296 state->digest_result_buff = NULL;
307 static void ssi_hash_unmap_request(struct device *dev,
308 struct ahash_req_ctx *state,
309 struct ssi_hash_ctx *ctx)
311 if (state->digest_buff_dma_addr) {
312 dma_unmap_single(dev, state->digest_buff_dma_addr,
313 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315 &state->digest_buff_dma_addr);
316 state->digest_buff_dma_addr = 0;
318 if (state->digest_bytes_len_dma_addr) {
319 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
320 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
321 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322 &state->digest_bytes_len_dma_addr);
323 state->digest_bytes_len_dma_addr = 0;
325 if (state->opad_digest_dma_addr) {
326 dma_unmap_single(dev, state->opad_digest_dma_addr,
327 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
328 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329 &state->opad_digest_dma_addr);
330 state->opad_digest_dma_addr = 0;
333 kfree(state->opad_digest_buff);
334 kfree(state->digest_bytes_len);
335 kfree(state->digest_buff);
336 kfree(state->digest_result_buff);
341 static void ssi_hash_unmap_result(struct device *dev,
342 struct ahash_req_ctx *state,
343 unsigned int digestsize, u8 *result)
345 if (state->digest_result_dma_addr) {
346 dma_unmap_single(dev,
347 state->digest_result_dma_addr,
350 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351 state->digest_result_buff,
352 &state->digest_result_dma_addr, digestsize);
354 state->digest_result_buff,
357 state->digest_result_dma_addr = 0;
360 static void ssi_hash_update_complete(struct device *dev, void *ssi_req)
362 struct ahash_request *req = (struct ahash_request *)ssi_req;
363 struct ahash_req_ctx *state = ahash_request_ctx(req);
365 dev_dbg(dev, "req=%pK\n", req);
367 cc_unmap_hash_request(dev, state, req->src, false);
368 req->base.complete(&req->base, 0);
371 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req)
373 struct ahash_request *req = (struct ahash_request *)ssi_req;
374 struct ahash_req_ctx *state = ahash_request_ctx(req);
375 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
377 u32 digestsize = crypto_ahash_digestsize(tfm);
379 dev_dbg(dev, "req=%pK\n", req);
381 cc_unmap_hash_request(dev, state, req->src, false);
382 ssi_hash_unmap_result(dev, state, digestsize, req->result);
383 ssi_hash_unmap_request(dev, state, ctx);
384 req->base.complete(&req->base, 0);
387 static void ssi_hash_complete(struct device *dev, void *ssi_req)
389 struct ahash_request *req = (struct ahash_request *)ssi_req;
390 struct ahash_req_ctx *state = ahash_request_ctx(req);
391 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
393 u32 digestsize = crypto_ahash_digestsize(tfm);
395 dev_dbg(dev, "req=%pK\n", req);
397 cc_unmap_hash_request(dev, state, req->src, false);
398 ssi_hash_unmap_result(dev, state, digestsize, req->result);
399 ssi_hash_unmap_request(dev, state, ctx);
400 req->base.complete(&req->base, 0);
403 static int ssi_hash_digest(struct ahash_req_ctx *state,
404 struct ssi_hash_ctx *ctx,
405 unsigned int digestsize,
406 struct scatterlist *src,
407 unsigned int nbytes, u8 *result,
410 struct device *dev = drvdata_to_dev(ctx->drvdata);
411 bool is_hmac = ctx->is_hmac;
412 struct ssi_crypto_req ssi_req = {};
413 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
414 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
415 ctx->drvdata, ctx->hash_mode);
419 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
422 if (unlikely(ssi_hash_map_request(dev, state, ctx))) {
423 dev_err(dev, "map_ahash_source() failed\n");
427 if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
428 dev_err(dev, "map_ahash_digest() failed\n");
432 if (unlikely(cc_map_hash_request_final(ctx->drvdata, state,
434 dev_err(dev, "map_ahash_request_final() failed\n");
439 /* Setup DX request structure */
440 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
441 ssi_req.user_arg = (void *)async_req;
444 /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
445 hw_desc_init(&desc[idx]);
446 set_cipher_mode(&desc[idx], ctx->hw_mode);
448 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
449 ctx->inter_digestsize, NS_BIT);
451 set_din_sram(&desc[idx], larval_digest_addr,
452 ctx->inter_digestsize);
454 set_flow_mode(&desc[idx], S_DIN_to_HASH);
455 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
458 /* Load the hash current length */
459 hw_desc_init(&desc[idx]);
460 set_cipher_mode(&desc[idx], ctx->hw_mode);
463 set_din_type(&desc[idx], DMA_DLLI,
464 state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
467 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
469 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
471 set_cipher_do(&desc[idx], DO_PAD);
473 set_flow_mode(&desc[idx], S_DIN_to_HASH);
474 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
477 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
480 /* HW last hash block padding (aka. "DO_PAD") */
481 hw_desc_init(&desc[idx]);
482 set_cipher_mode(&desc[idx], ctx->hw_mode);
483 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
484 HASH_LEN_SIZE, NS_BIT, 0);
485 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
486 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
487 set_cipher_do(&desc[idx], DO_PAD);
490 /* store the hash digest result in the context */
491 hw_desc_init(&desc[idx]);
492 set_cipher_mode(&desc[idx], ctx->hw_mode);
493 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
494 digestsize, NS_BIT, 0);
495 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
496 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
497 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
500 /* Loading hash opad xor key state */
501 hw_desc_init(&desc[idx]);
502 set_cipher_mode(&desc[idx], ctx->hw_mode);
503 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
504 ctx->inter_digestsize, NS_BIT);
505 set_flow_mode(&desc[idx], S_DIN_to_HASH);
506 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
509 /* Load the hash current length */
510 hw_desc_init(&desc[idx]);
511 set_cipher_mode(&desc[idx], ctx->hw_mode);
512 set_din_sram(&desc[idx],
513 ssi_ahash_get_initial_digest_len_sram_addr(
514 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
515 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
516 set_flow_mode(&desc[idx], S_DIN_to_HASH);
517 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
520 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
521 hw_desc_init(&desc[idx]);
522 set_din_no_dma(&desc[idx], 0, 0xfffff0);
523 set_dout_no_dma(&desc[idx], 0, 0, 1);
526 /* Perform HASH update */
527 hw_desc_init(&desc[idx]);
528 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
530 set_flow_mode(&desc[idx], DIN_HASH);
534 /* Get final MAC result */
535 hw_desc_init(&desc[idx]);
536 set_cipher_mode(&desc[idx], ctx->hw_mode);
538 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
539 NS_BIT, (async_req ? 1 : 0));
541 set_queue_last_ind(&desc[idx]);
542 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
543 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
544 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
545 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
549 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
550 if (unlikely(rc != -EINPROGRESS)) {
551 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
552 cc_unmap_hash_request(dev, state, src, true);
553 ssi_hash_unmap_result(dev, state, digestsize, result);
554 ssi_hash_unmap_request(dev, state, ctx);
557 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
559 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
560 cc_unmap_hash_request(dev, state, src, true);
562 cc_unmap_hash_request(dev, state, src, false);
564 ssi_hash_unmap_result(dev, state, digestsize, result);
565 ssi_hash_unmap_request(dev, state, ctx);
570 static int ssi_hash_update(struct ahash_req_ctx *state,
571 struct ssi_hash_ctx *ctx,
572 unsigned int block_size,
573 struct scatterlist *src,
577 struct device *dev = drvdata_to_dev(ctx->drvdata);
578 struct ssi_crypto_req ssi_req = {};
579 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
583 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
584 "hmac" : "hash", nbytes);
587 /* no real updates required */
591 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
595 dev_dbg(dev, " data size not require HW update %x\n",
597 /* No hardware updates are required */
600 dev_err(dev, "map_ahash_request_update() failed\n");
605 /* Setup DX request structure */
606 ssi_req.user_cb = (void *)ssi_hash_update_complete;
607 ssi_req.user_arg = async_req;
610 /* Restore hash digest */
611 hw_desc_init(&desc[idx]);
612 set_cipher_mode(&desc[idx], ctx->hw_mode);
613 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
614 ctx->inter_digestsize, NS_BIT);
615 set_flow_mode(&desc[idx], S_DIN_to_HASH);
616 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
618 /* Restore hash current length */
619 hw_desc_init(&desc[idx]);
620 set_cipher_mode(&desc[idx], ctx->hw_mode);
621 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
622 HASH_LEN_SIZE, NS_BIT);
623 set_flow_mode(&desc[idx], S_DIN_to_HASH);
624 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
627 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
629 /* store the hash digest result in context */
630 hw_desc_init(&desc[idx]);
631 set_cipher_mode(&desc[idx], ctx->hw_mode);
632 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
633 ctx->inter_digestsize, NS_BIT, 0);
634 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
635 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
638 /* store current hash length in context */
639 hw_desc_init(&desc[idx]);
640 set_cipher_mode(&desc[idx], ctx->hw_mode);
641 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
642 HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
644 set_queue_last_ind(&desc[idx]);
645 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
646 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
650 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
651 if (unlikely(rc != -EINPROGRESS)) {
652 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
653 cc_unmap_hash_request(dev, state, src, true);
656 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
658 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
659 cc_unmap_hash_request(dev, state, src, true);
661 cc_unmap_hash_request(dev, state, src, false);
667 static int ssi_hash_finup(struct ahash_req_ctx *state,
668 struct ssi_hash_ctx *ctx,
669 unsigned int digestsize,
670 struct scatterlist *src,
675 struct device *dev = drvdata_to_dev(ctx->drvdata);
676 bool is_hmac = ctx->is_hmac;
677 struct ssi_crypto_req ssi_req = {};
678 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
682 dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
685 if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
687 dev_err(dev, "map_ahash_request_final() failed\n");
690 if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
691 dev_err(dev, "map_ahash_digest() failed\n");
696 /* Setup DX request structure */
697 ssi_req.user_cb = (void *)ssi_hash_complete;
698 ssi_req.user_arg = async_req;
701 /* Restore hash digest */
702 hw_desc_init(&desc[idx]);
703 set_cipher_mode(&desc[idx], ctx->hw_mode);
704 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
705 ctx->inter_digestsize, NS_BIT);
706 set_flow_mode(&desc[idx], S_DIN_to_HASH);
707 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
710 /* Restore hash current length */
711 hw_desc_init(&desc[idx]);
712 set_cipher_mode(&desc[idx], ctx->hw_mode);
713 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
714 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
715 HASH_LEN_SIZE, NS_BIT);
716 set_flow_mode(&desc[idx], S_DIN_to_HASH);
717 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
720 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
723 /* Store the hash digest result in the context */
724 hw_desc_init(&desc[idx]);
725 set_cipher_mode(&desc[idx], ctx->hw_mode);
726 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
727 digestsize, NS_BIT, 0);
728 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
729 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
730 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
733 /* Loading hash OPAD xor key state */
734 hw_desc_init(&desc[idx]);
735 set_cipher_mode(&desc[idx], ctx->hw_mode);
736 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
737 ctx->inter_digestsize, NS_BIT);
738 set_flow_mode(&desc[idx], S_DIN_to_HASH);
739 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
742 /* Load the hash current length */
743 hw_desc_init(&desc[idx]);
744 set_cipher_mode(&desc[idx], ctx->hw_mode);
745 set_din_sram(&desc[idx],
746 ssi_ahash_get_initial_digest_len_sram_addr(
747 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
748 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
749 set_flow_mode(&desc[idx], S_DIN_to_HASH);
750 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
753 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
754 hw_desc_init(&desc[idx]);
755 set_din_no_dma(&desc[idx], 0, 0xfffff0);
756 set_dout_no_dma(&desc[idx], 0, 0, 1);
759 /* Perform HASH update on last digest */
760 hw_desc_init(&desc[idx]);
761 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
763 set_flow_mode(&desc[idx], DIN_HASH);
767 /* Get final MAC result */
768 hw_desc_init(&desc[idx]);
770 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
771 NS_BIT, (async_req ? 1 : 0));
773 set_queue_last_ind(&desc[idx]);
774 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
775 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
776 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
777 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
778 set_cipher_mode(&desc[idx], ctx->hw_mode);
782 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
783 if (unlikely(rc != -EINPROGRESS)) {
784 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
785 cc_unmap_hash_request(dev, state, src, true);
786 ssi_hash_unmap_result(dev, state, digestsize, result);
789 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
791 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
792 cc_unmap_hash_request(dev, state, src, true);
793 ssi_hash_unmap_result(dev, state, digestsize, result);
795 cc_unmap_hash_request(dev, state, src, false);
796 ssi_hash_unmap_result(dev, state, digestsize, result);
797 ssi_hash_unmap_request(dev, state, ctx);
803 static int ssi_hash_final(struct ahash_req_ctx *state,
804 struct ssi_hash_ctx *ctx,
805 unsigned int digestsize,
806 struct scatterlist *src,
811 struct device *dev = drvdata_to_dev(ctx->drvdata);
812 bool is_hmac = ctx->is_hmac;
813 struct ssi_crypto_req ssi_req = {};
814 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
818 dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
821 if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
823 dev_err(dev, "map_ahash_request_final() failed\n");
827 if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
828 dev_err(dev, "map_ahash_digest() failed\n");
833 /* Setup DX request structure */
834 ssi_req.user_cb = (void *)ssi_hash_complete;
835 ssi_req.user_arg = async_req;
838 /* Restore hash digest */
839 hw_desc_init(&desc[idx]);
840 set_cipher_mode(&desc[idx], ctx->hw_mode);
841 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
842 ctx->inter_digestsize, NS_BIT);
843 set_flow_mode(&desc[idx], S_DIN_to_HASH);
844 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
847 /* Restore hash current length */
848 hw_desc_init(&desc[idx]);
849 set_cipher_mode(&desc[idx], ctx->hw_mode);
850 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
851 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
852 HASH_LEN_SIZE, NS_BIT);
853 set_flow_mode(&desc[idx], S_DIN_to_HASH);
854 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
857 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
859 /* "DO-PAD" must be enabled only when writing current length to HW */
860 hw_desc_init(&desc[idx]);
861 set_cipher_do(&desc[idx], DO_PAD);
862 set_cipher_mode(&desc[idx], ctx->hw_mode);
863 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
864 HASH_LEN_SIZE, NS_BIT, 0);
865 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
866 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
870 /* Store the hash digest result in the context */
871 hw_desc_init(&desc[idx]);
872 set_cipher_mode(&desc[idx], ctx->hw_mode);
873 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
874 digestsize, NS_BIT, 0);
875 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
876 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
877 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
880 /* Loading hash OPAD xor key state */
881 hw_desc_init(&desc[idx]);
882 set_cipher_mode(&desc[idx], ctx->hw_mode);
883 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
884 ctx->inter_digestsize, NS_BIT);
885 set_flow_mode(&desc[idx], S_DIN_to_HASH);
886 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
889 /* Load the hash current length */
890 hw_desc_init(&desc[idx]);
891 set_cipher_mode(&desc[idx], ctx->hw_mode);
892 set_din_sram(&desc[idx],
893 ssi_ahash_get_initial_digest_len_sram_addr(
894 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
895 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
896 set_flow_mode(&desc[idx], S_DIN_to_HASH);
897 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
900 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
901 hw_desc_init(&desc[idx]);
902 set_din_no_dma(&desc[idx], 0, 0xfffff0);
903 set_dout_no_dma(&desc[idx], 0, 0, 1);
906 /* Perform HASH update on last digest */
907 hw_desc_init(&desc[idx]);
908 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
910 set_flow_mode(&desc[idx], DIN_HASH);
914 /* Get final MAC result */
915 hw_desc_init(&desc[idx]);
916 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
917 NS_BIT, (async_req ? 1 : 0));
919 set_queue_last_ind(&desc[idx]);
920 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
921 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
922 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
923 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
924 set_cipher_mode(&desc[idx], ctx->hw_mode);
928 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
929 if (unlikely(rc != -EINPROGRESS)) {
930 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
931 cc_unmap_hash_request(dev, state, src, true);
932 ssi_hash_unmap_result(dev, state, digestsize, result);
935 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
937 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
938 cc_unmap_hash_request(dev, state, src, true);
939 ssi_hash_unmap_result(dev, state, digestsize, result);
941 cc_unmap_hash_request(dev, state, src, false);
942 ssi_hash_unmap_result(dev, state, digestsize, result);
943 ssi_hash_unmap_request(dev, state, ctx);
949 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
951 struct device *dev = drvdata_to_dev(ctx->drvdata);
953 state->xcbc_count = 0;
955 ssi_hash_map_request(dev, state, ctx);
960 static int ssi_hash_setkey(void *hash,
965 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
966 struct ssi_crypto_req ssi_req = {};
967 struct ssi_hash_ctx *ctx = NULL;
970 int i, idx = 0, rc = 0;
971 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
972 ssi_sram_addr_t larval_addr;
975 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
976 dev = drvdata_to_dev(ctx->drvdata);
977 dev_dbg(dev, "start keylen: %d", keylen);
979 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
980 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
982 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
983 ctx->drvdata, ctx->hash_mode);
985 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
986 * any NON-ZERO value utilizes HMAC flow
988 ctx->key_params.keylen = keylen;
989 ctx->key_params.key_dma_addr = 0;
993 ctx->key_params.key_dma_addr = dma_map_single(
995 keylen, DMA_TO_DEVICE);
996 if (unlikely(dma_mapping_error(dev,
997 ctx->key_params.key_dma_addr))) {
998 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1002 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1003 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1005 if (keylen > blocksize) {
1006 /* Load hash initial state */
1007 hw_desc_init(&desc[idx]);
1008 set_cipher_mode(&desc[idx], ctx->hw_mode);
1009 set_din_sram(&desc[idx], larval_addr,
1010 ctx->inter_digestsize);
1011 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1012 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1015 /* Load the hash current length*/
1016 hw_desc_init(&desc[idx]);
1017 set_cipher_mode(&desc[idx], ctx->hw_mode);
1018 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1019 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1020 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1021 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1024 hw_desc_init(&desc[idx]);
1025 set_din_type(&desc[idx], DMA_DLLI,
1026 ctx->key_params.key_dma_addr, keylen,
1028 set_flow_mode(&desc[idx], DIN_HASH);
1031 /* Get hashed key */
1032 hw_desc_init(&desc[idx]);
1033 set_cipher_mode(&desc[idx], ctx->hw_mode);
1034 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1035 digestsize, NS_BIT, 0);
1036 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1037 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1038 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1039 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1042 hw_desc_init(&desc[idx]);
1043 set_din_const(&desc[idx], 0, (blocksize - digestsize));
1044 set_flow_mode(&desc[idx], BYPASS);
1045 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1047 (blocksize - digestsize), NS_BIT, 0);
1050 hw_desc_init(&desc[idx]);
1051 set_din_type(&desc[idx], DMA_DLLI,
1052 ctx->key_params.key_dma_addr, keylen,
1054 set_flow_mode(&desc[idx], BYPASS);
1055 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1059 if ((blocksize - keylen)) {
1060 hw_desc_init(&desc[idx]);
1061 set_din_const(&desc[idx], 0,
1062 (blocksize - keylen));
1063 set_flow_mode(&desc[idx], BYPASS);
1064 set_dout_dlli(&desc[idx],
1065 (ctx->opad_tmp_keys_dma_addr +
1066 keylen), (blocksize - keylen),
1072 hw_desc_init(&desc[idx]);
1073 set_din_const(&desc[idx], 0, blocksize);
1074 set_flow_mode(&desc[idx], BYPASS);
1075 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1076 blocksize, NS_BIT, 0);
1080 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1082 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1086 /* calc derived HMAC key */
1087 for (idx = 0, i = 0; i < 2; i++) {
1088 /* Load hash initial state */
1089 hw_desc_init(&desc[idx]);
1090 set_cipher_mode(&desc[idx], ctx->hw_mode);
1091 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1092 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1093 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1096 /* Load the hash current length*/
1097 hw_desc_init(&desc[idx]);
1098 set_cipher_mode(&desc[idx], ctx->hw_mode);
1099 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1100 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1101 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1104 /* Prepare ipad key */
1105 hw_desc_init(&desc[idx]);
1106 set_xor_val(&desc[idx], hmac_pad_const[i]);
1107 set_cipher_mode(&desc[idx], ctx->hw_mode);
1108 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1109 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1112 /* Perform HASH update */
1113 hw_desc_init(&desc[idx]);
1114 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1116 set_cipher_mode(&desc[idx], ctx->hw_mode);
1117 set_xor_active(&desc[idx]);
1118 set_flow_mode(&desc[idx], DIN_HASH);
1121 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1122 hw_desc_init(&desc[idx]);
1123 set_cipher_mode(&desc[idx], ctx->hw_mode);
1124 if (i > 0) /* Not first iteration */
1125 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1126 ctx->inter_digestsize, NS_BIT, 0);
1127 else /* First iteration */
1128 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1129 ctx->inter_digestsize, NS_BIT, 0);
1130 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1131 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1135 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1139 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1141 if (ctx->key_params.key_dma_addr) {
1142 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1143 ctx->key_params.keylen, DMA_TO_DEVICE);
1144 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1145 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1150 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1151 const u8 *key, unsigned int keylen)
1153 struct ssi_crypto_req ssi_req = {};
1154 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1155 struct device *dev = drvdata_to_dev(ctx->drvdata);
1156 int idx = 0, rc = 0;
1157 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1159 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1162 case AES_KEYSIZE_128:
1163 case AES_KEYSIZE_192:
1164 case AES_KEYSIZE_256:
1170 ctx->key_params.keylen = keylen;
1172 ctx->key_params.key_dma_addr = dma_map_single(
1174 keylen, DMA_TO_DEVICE);
1175 if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
1176 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1180 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1181 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1183 ctx->is_hmac = true;
1184 /* 1. Load the AES key */
1185 hw_desc_init(&desc[idx]);
1186 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1188 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1189 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1190 set_key_size_aes(&desc[idx], keylen);
1191 set_flow_mode(&desc[idx], S_DIN_to_AES);
1192 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1195 hw_desc_init(&desc[idx]);
1196 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1197 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1198 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1199 XCBC_MAC_K1_OFFSET),
1200 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1203 hw_desc_init(&desc[idx]);
1204 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1205 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1206 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1207 XCBC_MAC_K2_OFFSET),
1208 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1211 hw_desc_init(&desc[idx]);
1212 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1213 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1214 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1215 XCBC_MAC_K3_OFFSET),
1216 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1219 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1222 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1224 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1225 ctx->key_params.keylen, DMA_TO_DEVICE);
1226 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1227 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1233 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1234 const u8 *key, unsigned int keylen)
1236 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1237 struct device *dev = drvdata_to_dev(ctx->drvdata);
1239 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1241 ctx->is_hmac = true;
1244 case AES_KEYSIZE_128:
1245 case AES_KEYSIZE_192:
1246 case AES_KEYSIZE_256:
1252 ctx->key_params.keylen = keylen;
1254 /* STAT_PHASE_1: Copy key to ctx */
1256 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1257 keylen, DMA_TO_DEVICE);
1259 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1261 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1263 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1264 keylen, DMA_TO_DEVICE);
1266 ctx->key_params.keylen = keylen;
1272 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1274 struct device *dev = drvdata_to_dev(ctx->drvdata);
1276 if (ctx->digest_buff_dma_addr) {
1277 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1278 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1279 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1280 &ctx->digest_buff_dma_addr);
1281 ctx->digest_buff_dma_addr = 0;
1283 if (ctx->opad_tmp_keys_dma_addr) {
1284 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1285 sizeof(ctx->opad_tmp_keys_buff),
1287 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1288 &ctx->opad_tmp_keys_dma_addr);
1289 ctx->opad_tmp_keys_dma_addr = 0;
1292 ctx->key_params.keylen = 0;
1295 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1297 struct device *dev = drvdata_to_dev(ctx->drvdata);
1299 ctx->key_params.keylen = 0;
1301 ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1302 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1303 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1304 sizeof(ctx->digest_buff), ctx->digest_buff);
1307 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1308 sizeof(ctx->digest_buff), ctx->digest_buff,
1309 &ctx->digest_buff_dma_addr);
1311 ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1312 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1313 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1314 sizeof(ctx->opad_tmp_keys_buff),
1315 ctx->opad_tmp_keys_buff);
1318 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1319 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1320 &ctx->opad_tmp_keys_dma_addr);
1322 ctx->is_hmac = false;
1326 ssi_hash_free_ctx(ctx);
1330 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1332 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1333 struct hash_alg_common *hash_alg_common =
1334 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1335 struct ahash_alg *ahash_alg =
1336 container_of(hash_alg_common, struct ahash_alg, halg);
1337 struct ssi_hash_alg *ssi_alg =
1338 container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1340 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1341 sizeof(struct ahash_req_ctx));
1343 ctx->hash_mode = ssi_alg->hash_mode;
1344 ctx->hw_mode = ssi_alg->hw_mode;
1345 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1346 ctx->drvdata = ssi_alg->drvdata;
1348 return ssi_hash_alloc_ctx(ctx);
1351 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1353 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1354 struct device *dev = drvdata_to_dev(ctx->drvdata);
1356 dev_dbg(dev, "ssi_hash_cra_exit");
1357 ssi_hash_free_ctx(ctx);
1360 static int ssi_mac_update(struct ahash_request *req)
1362 struct ahash_req_ctx *state = ahash_request_ctx(req);
1363 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1364 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1365 struct device *dev = drvdata_to_dev(ctx->drvdata);
1366 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1367 struct ssi_crypto_req ssi_req = {};
1368 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1372 if (req->nbytes == 0) {
1373 /* no real updates required */
1377 state->xcbc_count++;
1379 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1380 req->nbytes, block_size);
1383 dev_dbg(dev, " data size not require HW update %x\n",
1385 /* No hardware updates are required */
1388 dev_err(dev, "map_ahash_request_update() failed\n");
1392 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1393 ssi_hash_create_xcbc_setup(req, desc, &idx);
1395 ssi_hash_create_cmac_setup(req, desc, &idx);
1397 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1399 /* store the hash digest result in context */
1400 hw_desc_init(&desc[idx]);
1401 set_cipher_mode(&desc[idx], ctx->hw_mode);
1402 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1403 ctx->inter_digestsize, NS_BIT, 1);
1404 set_queue_last_ind(&desc[idx]);
1405 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1409 /* Setup DX request structure */
1410 ssi_req.user_cb = (void *)ssi_hash_update_complete;
1411 ssi_req.user_arg = (void *)req;
1413 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1414 if (unlikely(rc != -EINPROGRESS)) {
1415 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1416 cc_unmap_hash_request(dev, state, req->src, true);
1421 static int ssi_mac_final(struct ahash_request *req)
1423 struct ahash_req_ctx *state = ahash_request_ctx(req);
1424 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1425 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1426 struct device *dev = drvdata_to_dev(ctx->drvdata);
1427 struct ssi_crypto_req ssi_req = {};
1428 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1431 u32 key_size, key_len;
1432 u32 digestsize = crypto_ahash_digestsize(tfm);
1434 u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1437 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1438 key_size = CC_AES_128_BIT_KEY_SIZE;
1439 key_len = CC_AES_128_BIT_KEY_SIZE;
1441 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1442 ctx->key_params.keylen;
1443 key_len = ctx->key_params.keylen;
1446 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1448 if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
1450 dev_err(dev, "map_ahash_request_final() failed\n");
1454 if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
1455 dev_err(dev, "map_ahash_digest() failed\n");
1459 /* Setup DX request structure */
1460 ssi_req.user_cb = (void *)ssi_hash_complete;
1461 ssi_req.user_arg = (void *)req;
1463 if (state->xcbc_count && rem_cnt == 0) {
1464 /* Load key for ECB decryption */
1465 hw_desc_init(&desc[idx]);
1466 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1467 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1468 set_din_type(&desc[idx], DMA_DLLI,
1469 (ctx->opad_tmp_keys_dma_addr +
1470 XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1471 set_key_size_aes(&desc[idx], key_len);
1472 set_flow_mode(&desc[idx], S_DIN_to_AES);
1473 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1476 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1477 hw_desc_init(&desc[idx]);
1478 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1479 CC_AES_BLOCK_SIZE, NS_BIT);
1480 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1481 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1482 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1485 /* Memory Barrier: wait for axi write to complete */
1486 hw_desc_init(&desc[idx]);
1487 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1488 set_dout_no_dma(&desc[idx], 0, 0, 1);
1492 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1493 ssi_hash_create_xcbc_setup(req, desc, &idx);
1495 ssi_hash_create_cmac_setup(req, desc, &idx);
1497 if (state->xcbc_count == 0) {
1498 hw_desc_init(&desc[idx]);
1499 set_cipher_mode(&desc[idx], ctx->hw_mode);
1500 set_key_size_aes(&desc[idx], key_len);
1501 set_cmac_size0_mode(&desc[idx]);
1502 set_flow_mode(&desc[idx], S_DIN_to_AES);
1504 } else if (rem_cnt > 0) {
1505 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1507 hw_desc_init(&desc[idx]);
1508 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1509 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1513 /* Get final MAC result */
1514 hw_desc_init(&desc[idx]);
1516 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1517 digestsize, NS_BIT, 1);
1518 set_queue_last_ind(&desc[idx]);
1519 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1520 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1521 set_cipher_mode(&desc[idx], ctx->hw_mode);
1524 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1525 if (unlikely(rc != -EINPROGRESS)) {
1526 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1527 cc_unmap_hash_request(dev, state, req->src, true);
1528 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1533 static int ssi_mac_finup(struct ahash_request *req)
1535 struct ahash_req_ctx *state = ahash_request_ctx(req);
1536 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1537 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1538 struct device *dev = drvdata_to_dev(ctx->drvdata);
1539 struct ssi_crypto_req ssi_req = {};
1540 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1544 u32 digestsize = crypto_ahash_digestsize(tfm);
1546 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1547 if (state->xcbc_count > 0 && req->nbytes == 0) {
1548 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1549 return ssi_mac_final(req);
1552 if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
1554 dev_err(dev, "map_ahash_request_final() failed\n");
1557 if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
1558 dev_err(dev, "map_ahash_digest() failed\n");
1562 /* Setup DX request structure */
1563 ssi_req.user_cb = (void *)ssi_hash_complete;
1564 ssi_req.user_arg = (void *)req;
1566 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1567 key_len = CC_AES_128_BIT_KEY_SIZE;
1568 ssi_hash_create_xcbc_setup(req, desc, &idx);
1570 key_len = ctx->key_params.keylen;
1571 ssi_hash_create_cmac_setup(req, desc, &idx);
1574 if (req->nbytes == 0) {
1575 hw_desc_init(&desc[idx]);
1576 set_cipher_mode(&desc[idx], ctx->hw_mode);
1577 set_key_size_aes(&desc[idx], key_len);
1578 set_cmac_size0_mode(&desc[idx]);
1579 set_flow_mode(&desc[idx], S_DIN_to_AES);
1582 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1585 /* Get final MAC result */
1586 hw_desc_init(&desc[idx]);
1588 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1589 digestsize, NS_BIT, 1);
1590 set_queue_last_ind(&desc[idx]);
1591 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1592 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1593 set_cipher_mode(&desc[idx], ctx->hw_mode);
1596 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1597 if (unlikely(rc != -EINPROGRESS)) {
1598 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1599 cc_unmap_hash_request(dev, state, req->src, true);
1600 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1605 static int ssi_mac_digest(struct ahash_request *req)
1607 struct ahash_req_ctx *state = ahash_request_ctx(req);
1608 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1609 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1610 struct device *dev = drvdata_to_dev(ctx->drvdata);
1611 u32 digestsize = crypto_ahash_digestsize(tfm);
1612 struct ssi_crypto_req ssi_req = {};
1613 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1618 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1620 if (unlikely(ssi_hash_map_request(dev, state, ctx))) {
1621 dev_err(dev, "map_ahash_source() failed\n");
1624 if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
1625 dev_err(dev, "map_ahash_digest() failed\n");
1629 if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
1631 dev_err(dev, "map_ahash_request_final() failed\n");
1635 /* Setup DX request structure */
1636 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1637 ssi_req.user_arg = (void *)req;
1639 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1640 key_len = CC_AES_128_BIT_KEY_SIZE;
1641 ssi_hash_create_xcbc_setup(req, desc, &idx);
1643 key_len = ctx->key_params.keylen;
1644 ssi_hash_create_cmac_setup(req, desc, &idx);
1647 if (req->nbytes == 0) {
1648 hw_desc_init(&desc[idx]);
1649 set_cipher_mode(&desc[idx], ctx->hw_mode);
1650 set_key_size_aes(&desc[idx], key_len);
1651 set_cmac_size0_mode(&desc[idx]);
1652 set_flow_mode(&desc[idx], S_DIN_to_AES);
1655 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1658 /* Get final MAC result */
1659 hw_desc_init(&desc[idx]);
1660 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1661 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1662 set_queue_last_ind(&desc[idx]);
1663 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1664 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1665 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1666 set_cipher_mode(&desc[idx], ctx->hw_mode);
1669 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1670 if (unlikely(rc != -EINPROGRESS)) {
1671 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1672 cc_unmap_hash_request(dev, state, req->src, true);
1673 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1674 ssi_hash_unmap_request(dev, state, ctx);
1679 //ahash wrap functions
1680 static int ssi_ahash_digest(struct ahash_request *req)
1682 struct ahash_req_ctx *state = ahash_request_ctx(req);
1683 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1684 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1685 u32 digestsize = crypto_ahash_digestsize(tfm);
1687 return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1690 static int ssi_ahash_update(struct ahash_request *req)
1692 struct ahash_req_ctx *state = ahash_request_ctx(req);
1693 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1694 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1695 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1697 return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1700 static int ssi_ahash_finup(struct ahash_request *req)
1702 struct ahash_req_ctx *state = ahash_request_ctx(req);
1703 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1704 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1705 u32 digestsize = crypto_ahash_digestsize(tfm);
1707 return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1710 static int ssi_ahash_final(struct ahash_request *req)
1712 struct ahash_req_ctx *state = ahash_request_ctx(req);
1713 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1714 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1715 u32 digestsize = crypto_ahash_digestsize(tfm);
1717 return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1720 static int ssi_ahash_init(struct ahash_request *req)
1722 struct ahash_req_ctx *state = ahash_request_ctx(req);
1723 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1724 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1725 struct device *dev = drvdata_to_dev(ctx->drvdata);
1727 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
1729 return ssi_hash_init(state, ctx);
1732 static int ssi_ahash_export(struct ahash_request *req, void *out)
1734 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1735 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1736 struct device *dev = drvdata_to_dev(ctx->drvdata);
1737 struct ahash_req_ctx *state = ahash_request_ctx(req);
1738 u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1739 u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1741 const u32 tmp = CC_EXPORT_MAGIC;
1743 memcpy(out, &tmp, sizeof(u32));
1746 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1747 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1748 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1749 out += ctx->inter_digestsize;
1751 if (state->digest_bytes_len_dma_addr) {
1752 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1753 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1754 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1756 /* Poison the unused exported digest len field. */
1757 memset(out, 0x5F, HASH_LEN_SIZE);
1759 out += HASH_LEN_SIZE;
1761 memcpy(out, &curr_buff_cnt, sizeof(u32));
1764 memcpy(out, curr_buff, curr_buff_cnt);
1766 /* No sync for device ineeded since we did not change the data,
1773 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1776 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1777 struct device *dev = drvdata_to_dev(ctx->drvdata);
1778 struct ahash_req_ctx *state = ahash_request_ctx(req);
1782 memcpy(&tmp, in, sizeof(u32));
1783 if (tmp != CC_EXPORT_MAGIC) {
1789 rc = ssi_hash_init(state, ctx);
1793 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1794 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1795 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1796 in += ctx->inter_digestsize;
1798 if (state->digest_bytes_len_dma_addr) {
1799 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1800 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1801 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1803 in += HASH_LEN_SIZE;
1805 dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1806 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1808 if (state->digest_bytes_len_dma_addr)
1809 dma_sync_single_for_device(dev,
1810 state->digest_bytes_len_dma_addr,
1811 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1813 state->buff_index = 0;
1815 /* Sanity check the data as much as possible */
1816 memcpy(&tmp, in, sizeof(u32));
1817 if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1823 state->buff0_cnt = tmp;
1824 memcpy(state->buff0, in, state->buff0_cnt);
1830 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1831 const u8 *key, unsigned int keylen)
1833 return ssi_hash_setkey((void *)ahash, key, keylen, false);
1836 struct ssi_hash_template {
1837 char name[CRYPTO_MAX_ALG_NAME];
1838 char driver_name[CRYPTO_MAX_ALG_NAME];
1839 char mac_name[CRYPTO_MAX_ALG_NAME];
1840 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1841 unsigned int blocksize;
1843 struct ahash_alg template_ahash;
1846 int inter_digestsize;
1847 struct ssi_drvdata *drvdata;
1850 #define CC_STATE_SIZE(_x) \
1851 ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1853 /* hash descriptors */
1854 static struct ssi_hash_template driver_hash[] = {
1855 //Asynchronize hash template
1858 .driver_name = "sha1-dx",
1859 .mac_name = "hmac(sha1)",
1860 .mac_driver_name = "hmac-sha1-dx",
1861 .blocksize = SHA1_BLOCK_SIZE,
1862 .synchronize = false,
1864 .init = ssi_ahash_init,
1865 .update = ssi_ahash_update,
1866 .final = ssi_ahash_final,
1867 .finup = ssi_ahash_finup,
1868 .digest = ssi_ahash_digest,
1869 .export = ssi_ahash_export,
1870 .import = ssi_ahash_import,
1871 .setkey = ssi_ahash_setkey,
1873 .digestsize = SHA1_DIGEST_SIZE,
1874 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1877 .hash_mode = DRV_HASH_SHA1,
1878 .hw_mode = DRV_HASH_HW_SHA1,
1879 .inter_digestsize = SHA1_DIGEST_SIZE,
1883 .driver_name = "sha256-dx",
1884 .mac_name = "hmac(sha256)",
1885 .mac_driver_name = "hmac-sha256-dx",
1886 .blocksize = SHA256_BLOCK_SIZE,
1888 .init = ssi_ahash_init,
1889 .update = ssi_ahash_update,
1890 .final = ssi_ahash_final,
1891 .finup = ssi_ahash_finup,
1892 .digest = ssi_ahash_digest,
1893 .export = ssi_ahash_export,
1894 .import = ssi_ahash_import,
1895 .setkey = ssi_ahash_setkey,
1897 .digestsize = SHA256_DIGEST_SIZE,
1898 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1901 .hash_mode = DRV_HASH_SHA256,
1902 .hw_mode = DRV_HASH_HW_SHA256,
1903 .inter_digestsize = SHA256_DIGEST_SIZE,
1907 .driver_name = "sha224-dx",
1908 .mac_name = "hmac(sha224)",
1909 .mac_driver_name = "hmac-sha224-dx",
1910 .blocksize = SHA224_BLOCK_SIZE,
1912 .init = ssi_ahash_init,
1913 .update = ssi_ahash_update,
1914 .final = ssi_ahash_final,
1915 .finup = ssi_ahash_finup,
1916 .digest = ssi_ahash_digest,
1917 .export = ssi_ahash_export,
1918 .import = ssi_ahash_import,
1919 .setkey = ssi_ahash_setkey,
1921 .digestsize = SHA224_DIGEST_SIZE,
1922 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1925 .hash_mode = DRV_HASH_SHA224,
1926 .hw_mode = DRV_HASH_HW_SHA256,
1927 .inter_digestsize = SHA256_DIGEST_SIZE,
1929 #if (DX_DEV_SHA_MAX > 256)
1932 .driver_name = "sha384-dx",
1933 .mac_name = "hmac(sha384)",
1934 .mac_driver_name = "hmac-sha384-dx",
1935 .blocksize = SHA384_BLOCK_SIZE,
1937 .init = ssi_ahash_init,
1938 .update = ssi_ahash_update,
1939 .final = ssi_ahash_final,
1940 .finup = ssi_ahash_finup,
1941 .digest = ssi_ahash_digest,
1942 .export = ssi_ahash_export,
1943 .import = ssi_ahash_import,
1944 .setkey = ssi_ahash_setkey,
1946 .digestsize = SHA384_DIGEST_SIZE,
1947 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1950 .hash_mode = DRV_HASH_SHA384,
1951 .hw_mode = DRV_HASH_HW_SHA512,
1952 .inter_digestsize = SHA512_DIGEST_SIZE,
1956 .driver_name = "sha512-dx",
1957 .mac_name = "hmac(sha512)",
1958 .mac_driver_name = "hmac-sha512-dx",
1959 .blocksize = SHA512_BLOCK_SIZE,
1961 .init = ssi_ahash_init,
1962 .update = ssi_ahash_update,
1963 .final = ssi_ahash_final,
1964 .finup = ssi_ahash_finup,
1965 .digest = ssi_ahash_digest,
1966 .export = ssi_ahash_export,
1967 .import = ssi_ahash_import,
1968 .setkey = ssi_ahash_setkey,
1970 .digestsize = SHA512_DIGEST_SIZE,
1971 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1974 .hash_mode = DRV_HASH_SHA512,
1975 .hw_mode = DRV_HASH_HW_SHA512,
1976 .inter_digestsize = SHA512_DIGEST_SIZE,
1981 .driver_name = "md5-dx",
1982 .mac_name = "hmac(md5)",
1983 .mac_driver_name = "hmac-md5-dx",
1984 .blocksize = MD5_HMAC_BLOCK_SIZE,
1986 .init = ssi_ahash_init,
1987 .update = ssi_ahash_update,
1988 .final = ssi_ahash_final,
1989 .finup = ssi_ahash_finup,
1990 .digest = ssi_ahash_digest,
1991 .export = ssi_ahash_export,
1992 .import = ssi_ahash_import,
1993 .setkey = ssi_ahash_setkey,
1995 .digestsize = MD5_DIGEST_SIZE,
1996 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1999 .hash_mode = DRV_HASH_MD5,
2000 .hw_mode = DRV_HASH_HW_MD5,
2001 .inter_digestsize = MD5_DIGEST_SIZE,
2004 .mac_name = "xcbc(aes)",
2005 .mac_driver_name = "xcbc-aes-dx",
2006 .blocksize = AES_BLOCK_SIZE,
2008 .init = ssi_ahash_init,
2009 .update = ssi_mac_update,
2010 .final = ssi_mac_final,
2011 .finup = ssi_mac_finup,
2012 .digest = ssi_mac_digest,
2013 .setkey = ssi_xcbc_setkey,
2014 .export = ssi_ahash_export,
2015 .import = ssi_ahash_import,
2017 .digestsize = AES_BLOCK_SIZE,
2018 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2021 .hash_mode = DRV_HASH_NULL,
2022 .hw_mode = DRV_CIPHER_XCBC_MAC,
2023 .inter_digestsize = AES_BLOCK_SIZE,
2027 .mac_name = "cmac(aes)",
2028 .mac_driver_name = "cmac-aes-dx",
2029 .blocksize = AES_BLOCK_SIZE,
2031 .init = ssi_ahash_init,
2032 .update = ssi_mac_update,
2033 .final = ssi_mac_final,
2034 .finup = ssi_mac_finup,
2035 .digest = ssi_mac_digest,
2036 .setkey = ssi_cmac_setkey,
2037 .export = ssi_ahash_export,
2038 .import = ssi_ahash_import,
2040 .digestsize = AES_BLOCK_SIZE,
2041 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2044 .hash_mode = DRV_HASH_NULL,
2045 .hw_mode = DRV_CIPHER_CMAC,
2046 .inter_digestsize = AES_BLOCK_SIZE,
2052 static struct ssi_hash_alg *
2053 ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
2056 struct ssi_hash_alg *t_crypto_alg;
2057 struct crypto_alg *alg;
2058 struct ahash_alg *halg;
2060 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2062 return ERR_PTR(-ENOMEM);
2064 t_crypto_alg->ahash_alg = template->template_ahash;
2065 halg = &t_crypto_alg->ahash_alg;
2066 alg = &halg->halg.base;
2069 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2070 template->mac_name);
2071 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2072 template->mac_driver_name);
2074 halg->setkey = NULL;
2075 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2077 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2078 template->driver_name);
2080 alg->cra_module = THIS_MODULE;
2081 alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2082 alg->cra_priority = SSI_CRA_PRIO;
2083 alg->cra_blocksize = template->blocksize;
2084 alg->cra_alignmask = 0;
2085 alg->cra_exit = ssi_hash_cra_exit;
2087 alg->cra_init = ssi_ahash_cra_init;
2088 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2089 CRYPTO_ALG_KERN_DRIVER_ONLY;
2090 alg->cra_type = &crypto_ahash_type;
2092 t_crypto_alg->hash_mode = template->hash_mode;
2093 t_crypto_alg->hw_mode = template->hw_mode;
2094 t_crypto_alg->inter_digestsize = template->inter_digestsize;
2096 return t_crypto_alg;
2099 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2101 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2102 ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2103 unsigned int larval_seq_len = 0;
2104 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2105 struct device *dev = drvdata_to_dev(drvdata);
2107 #if (DX_DEV_SHA_MAX > 256)
2111 /* Copy-to-sram digest-len */
2112 cc_set_sram_desc(digest_len_init, sram_buff_ofs,
2113 ARRAY_SIZE(digest_len_init), larval_seq,
2115 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2117 goto init_digest_const_err;
2119 sram_buff_ofs += sizeof(digest_len_init);
2122 #if (DX_DEV_SHA_MAX > 256)
2123 /* Copy-to-sram digest-len for sha384/512 */
2124 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
2125 ARRAY_SIZE(digest_len_sha512_init),
2126 larval_seq, &larval_seq_len);
2127 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2129 goto init_digest_const_err;
2131 sram_buff_ofs += sizeof(digest_len_sha512_init);
2135 /* The initial digests offset */
2136 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2138 /* Copy-to-sram initial SHA* digests */
2139 cc_set_sram_desc(md5_init, sram_buff_ofs,
2140 ARRAY_SIZE(md5_init), larval_seq,
2142 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2144 goto init_digest_const_err;
2145 sram_buff_ofs += sizeof(md5_init);
2148 cc_set_sram_desc(sha1_init, sram_buff_ofs,
2149 ARRAY_SIZE(sha1_init), larval_seq,
2151 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2153 goto init_digest_const_err;
2154 sram_buff_ofs += sizeof(sha1_init);
2157 cc_set_sram_desc(sha224_init, sram_buff_ofs,
2158 ARRAY_SIZE(sha224_init), larval_seq,
2160 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2162 goto init_digest_const_err;
2163 sram_buff_ofs += sizeof(sha224_init);
2166 cc_set_sram_desc(sha256_init, sram_buff_ofs,
2167 ARRAY_SIZE(sha256_init), larval_seq,
2169 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2171 goto init_digest_const_err;
2172 sram_buff_ofs += sizeof(sha256_init);
2175 #if (DX_DEV_SHA_MAX > 256)
2176 /* We are forced to swap each double-word larval before copying to sram */
2177 for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2178 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2179 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2181 cc_set_sram_desc(&const0, sram_buff_ofs, 1, larval_seq,
2183 sram_buff_ofs += sizeof(u32);
2184 cc_set_sram_desc(&const1, sram_buff_ofs, 1, larval_seq,
2186 sram_buff_ofs += sizeof(u32);
2188 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2190 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2191 goto init_digest_const_err;
2195 for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2196 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2197 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2199 cc_set_sram_desc(&const0, sram_buff_ofs, 1, larval_seq,
2201 sram_buff_ofs += sizeof(u32);
2202 cc_set_sram_desc(&const1, sram_buff_ofs, 1, larval_seq,
2204 sram_buff_ofs += sizeof(u32);
2206 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2208 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2209 goto init_digest_const_err;
2213 init_digest_const_err:
2217 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2219 struct ssi_hash_handle *hash_handle;
2220 ssi_sram_addr_t sram_buff;
2221 u32 sram_size_to_alloc;
2222 struct device *dev = drvdata_to_dev(drvdata);
2226 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2230 INIT_LIST_HEAD(&hash_handle->hash_list);
2231 drvdata->hash_handle = hash_handle;
2233 sram_size_to_alloc = sizeof(digest_len_init) +
2234 #if (DX_DEV_SHA_MAX > 256)
2235 sizeof(digest_len_sha512_init) +
2236 sizeof(sha384_init) +
2237 sizeof(sha512_init) +
2241 sizeof(sha224_init) +
2242 sizeof(sha256_init);
2244 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2245 if (sram_buff == NULL_SRAM_ADDR) {
2246 dev_err(dev, "SRAM pool exhausted\n");
2251 /* The initial digest-len offset */
2252 hash_handle->digest_len_sram_addr = sram_buff;
2254 /*must be set before the alg registration as it is being used there*/
2255 rc = ssi_hash_init_sram_digest_consts(drvdata);
2257 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2261 /* ahash registration */
2262 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2263 struct ssi_hash_alg *t_alg;
2264 int hw_mode = driver_hash[alg].hw_mode;
2266 /* register hmac version */
2267 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
2268 if (IS_ERR(t_alg)) {
2269 rc = PTR_ERR(t_alg);
2270 dev_err(dev, "%s alg allocation failed\n",
2271 driver_hash[alg].driver_name);
2274 t_alg->drvdata = drvdata;
2276 rc = crypto_register_ahash(&t_alg->ahash_alg);
2278 dev_err(dev, "%s alg registration failed\n",
2279 driver_hash[alg].driver_name);
2283 list_add_tail(&t_alg->entry,
2284 &hash_handle->hash_list);
2287 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2288 hw_mode == DRV_CIPHER_CMAC)
2291 /* register hash version */
2292 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
2293 if (IS_ERR(t_alg)) {
2294 rc = PTR_ERR(t_alg);
2295 dev_err(dev, "%s alg allocation failed\n",
2296 driver_hash[alg].driver_name);
2299 t_alg->drvdata = drvdata;
2301 rc = crypto_register_ahash(&t_alg->ahash_alg);
2303 dev_err(dev, "%s alg registration failed\n",
2304 driver_hash[alg].driver_name);
2308 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2315 kfree(drvdata->hash_handle);
2316 drvdata->hash_handle = NULL;
2320 int ssi_hash_free(struct ssi_drvdata *drvdata)
2322 struct ssi_hash_alg *t_hash_alg, *hash_n;
2323 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2326 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2327 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2328 list_del(&t_hash_alg->entry);
2333 drvdata->hash_handle = NULL;
2338 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2339 struct cc_hw_desc desc[],
2340 unsigned int *seq_size)
2342 unsigned int idx = *seq_size;
2343 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2344 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2345 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2347 /* Setup XCBC MAC K1 */
2348 hw_desc_init(&desc[idx]);
2349 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2350 XCBC_MAC_K1_OFFSET),
2351 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2352 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2353 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2354 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2355 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2356 set_flow_mode(&desc[idx], S_DIN_to_AES);
2359 /* Setup XCBC MAC K2 */
2360 hw_desc_init(&desc[idx]);
2361 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2362 XCBC_MAC_K2_OFFSET),
2363 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2364 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2365 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2366 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2367 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2368 set_flow_mode(&desc[idx], S_DIN_to_AES);
2371 /* Setup XCBC MAC K3 */
2372 hw_desc_init(&desc[idx]);
2373 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2374 XCBC_MAC_K3_OFFSET),
2375 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2376 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2377 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2378 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2379 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2380 set_flow_mode(&desc[idx], S_DIN_to_AES);
2383 /* Loading MAC state */
2384 hw_desc_init(&desc[idx]);
2385 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2386 CC_AES_BLOCK_SIZE, NS_BIT);
2387 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2388 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2389 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2390 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2391 set_flow_mode(&desc[idx], S_DIN_to_AES);
2396 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2397 struct cc_hw_desc desc[],
2398 unsigned int *seq_size)
2400 unsigned int idx = *seq_size;
2401 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2402 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2403 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2405 /* Setup CMAC Key */
2406 hw_desc_init(&desc[idx]);
2407 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2408 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2409 ctx->key_params.keylen), NS_BIT);
2410 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2411 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2412 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2413 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2414 set_flow_mode(&desc[idx], S_DIN_to_AES);
2417 /* Load MAC state */
2418 hw_desc_init(&desc[idx]);
2419 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2420 CC_AES_BLOCK_SIZE, NS_BIT);
2421 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2422 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2423 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2424 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2425 set_flow_mode(&desc[idx], S_DIN_to_AES);
2430 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2431 struct ssi_hash_ctx *ctx,
2432 unsigned int flow_mode,
2433 struct cc_hw_desc desc[],
2434 bool is_not_last_data,
2435 unsigned int *seq_size)
2437 unsigned int idx = *seq_size;
2438 struct device *dev = drvdata_to_dev(ctx->drvdata);
2440 if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2441 hw_desc_init(&desc[idx]);
2442 set_din_type(&desc[idx], DMA_DLLI,
2443 sg_dma_address(areq_ctx->curr_sg),
2444 areq_ctx->curr_sg->length, NS_BIT);
2445 set_flow_mode(&desc[idx], flow_mode);
2448 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2449 dev_dbg(dev, " NULL mode\n");
2450 /* nothing to build */
2454 hw_desc_init(&desc[idx]);
2455 set_din_type(&desc[idx], DMA_DLLI,
2456 areq_ctx->mlli_params.mlli_dma_addr,
2457 areq_ctx->mlli_params.mlli_len, NS_BIT);
2458 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2459 areq_ctx->mlli_params.mlli_len);
2460 set_flow_mode(&desc[idx], BYPASS);
2463 hw_desc_init(&desc[idx]);
2464 set_din_type(&desc[idx], DMA_MLLI,
2465 ctx->drvdata->mlli_sram_addr,
2466 areq_ctx->mlli_nents, NS_BIT);
2467 set_flow_mode(&desc[idx], flow_mode);
2470 if (is_not_last_data)
2471 set_din_not_last_indication(&desc[(idx - 1)]);
2472 /* return updated desc sequence size */
2477 * Gets the address of the initial digest in SRAM
2478 * according to the given hash mode
2481 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2483 * \return u32 The address of the initial digest in SRAM
2485 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2487 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2488 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2489 struct device *dev = drvdata_to_dev(_drvdata);
2495 return (hash_handle->larval_digest_sram_addr);
2497 return (hash_handle->larval_digest_sram_addr +
2499 case DRV_HASH_SHA224:
2500 return (hash_handle->larval_digest_sram_addr +
2503 case DRV_HASH_SHA256:
2504 return (hash_handle->larval_digest_sram_addr +
2507 sizeof(sha224_init));
2508 #if (DX_DEV_SHA_MAX > 256)
2509 case DRV_HASH_SHA384:
2510 return (hash_handle->larval_digest_sram_addr +
2513 sizeof(sha224_init) +
2514 sizeof(sha256_init));
2515 case DRV_HASH_SHA512:
2516 return (hash_handle->larval_digest_sram_addr +
2519 sizeof(sha224_init) +
2520 sizeof(sha256_init) +
2521 sizeof(sha384_init));
2524 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2527 /*This is valid wrong value to avoid kernel crash*/
2528 return hash_handle->larval_digest_sram_addr;
2532 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2534 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2535 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2536 ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2540 case DRV_HASH_SHA224:
2541 case DRV_HASH_SHA256:
2543 return digest_len_addr;
2544 #if (DX_DEV_SHA_MAX > 256)
2545 case DRV_HASH_SHA384:
2546 case DRV_HASH_SHA512:
2547 return digest_len_addr + sizeof(digest_len_init);
2550 return digest_len_addr; /*to avoid kernel crash*/