1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/stmp_device.h>
17 #include <linux/clk.h>
19 #include <crypto/aes.h>
20 #include <crypto/sha1.h>
21 #include <crypto/sha2.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
24 #include <crypto/scatterwalk.h>
26 #define DCP_MAX_CHANS 4
27 #define DCP_BUF_SZ PAGE_SIZE
28 #define DCP_SHA_PAY_SZ 64
30 #define DCP_ALIGNMENT 64
33 * Null hashes to align with hw behavior on imx6sl and ull
34 * these are flipped for consistency with hw output
36 static const uint8_t sha1_null_hash[] =
37 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
38 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
40 static const uint8_t sha256_null_hash[] =
41 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
42 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
43 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
44 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
46 /* DCP DMA descriptor. */
48 uint32_t next_cmd_addr;
58 /* Coherent aligned block for bounce buffering. */
59 struct dcp_coherent_block {
60 uint8_t aes_in_buf[DCP_BUF_SZ];
61 uint8_t aes_out_buf[DCP_BUF_SZ];
62 uint8_t sha_in_buf[DCP_BUF_SZ];
63 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
65 uint8_t aes_key[2 * AES_KEYSIZE_128];
67 struct dcp_dma_desc desc[DCP_MAX_CHANS];
76 struct dcp_coherent_block *coh;
78 struct completion completion[DCP_MAX_CHANS];
79 spinlock_t lock[DCP_MAX_CHANS];
80 struct task_struct *thread[DCP_MAX_CHANS];
81 struct crypto_queue queue[DCP_MAX_CHANS];
86 DCP_CHAN_HASH_SHA = 0,
90 struct dcp_async_ctx {
95 /* SHA Hash-specific context */
100 /* Crypto-specific context */
101 struct crypto_skcipher *fallback;
102 unsigned int key_len;
103 uint8_t key[AES_KEYSIZE_128];
106 struct dcp_aes_req_ctx {
109 struct skcipher_request fallback_req; // keep at the end
112 struct dcp_sha_req_ctx {
117 struct dcp_export_state {
118 struct dcp_sha_req_ctx req_ctx;
119 struct dcp_async_ctx async_ctx;
123 * There can even be only one instance of the MXS DCP due to the
124 * design of Linux Crypto API.
126 static struct dcp *global_sdcp;
128 /* DCP register layout. */
129 #define MXS_DCP_CTRL 0x00
130 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
131 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
133 #define MXS_DCP_STAT 0x10
134 #define MXS_DCP_STAT_CLR 0x18
135 #define MXS_DCP_STAT_IRQ_MASK 0xf
137 #define MXS_DCP_CHANNELCTRL 0x20
138 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
140 #define MXS_DCP_CAPABILITY1 0x40
141 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
142 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
143 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
145 #define MXS_DCP_CONTEXT 0x50
147 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
149 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
151 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
152 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
154 /* DMA descriptor bits. */
155 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
156 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
157 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
158 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
159 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
160 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
161 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
162 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
163 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
165 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
166 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
167 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
168 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
169 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
171 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
174 struct dcp *sdcp = global_sdcp;
175 const int chan = actx->chan;
178 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
179 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
182 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
186 reinit_completion(&sdcp->completion[chan]);
188 /* Clear status register. */
189 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
191 /* Load the DMA descriptor. */
192 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
194 /* Increment the semaphore to start the DMA transfer. */
195 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
197 ret = wait_for_completion_timeout(&sdcp->completion[chan],
198 msecs_to_jiffies(1000));
200 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
201 chan, readl(sdcp->base + MXS_DCP_STAT));
205 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
207 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
212 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
218 * Encryption (AES128)
220 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
221 struct skcipher_request *req, int init)
223 dma_addr_t key_phys, src_phys, dst_phys;
224 struct dcp *sdcp = global_sdcp;
225 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
226 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
229 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
230 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
231 ret = dma_mapping_error(sdcp->dev, key_phys);
235 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
236 DCP_BUF_SZ, DMA_TO_DEVICE);
237 ret = dma_mapping_error(sdcp->dev, src_phys);
241 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
242 DCP_BUF_SZ, DMA_FROM_DEVICE);
243 ret = dma_mapping_error(sdcp->dev, dst_phys);
247 if (actx->fill % AES_BLOCK_SIZE) {
248 dev_err(sdcp->dev, "Invalid block size!\n");
253 /* Fill in the DMA descriptor. */
254 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
255 MXS_DCP_CONTROL0_INTERRUPT |
256 MXS_DCP_CONTROL0_ENABLE_CIPHER;
258 /* Payload contains the key. */
259 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
262 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
264 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
266 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
269 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
271 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
273 desc->next_cmd_addr = 0;
274 desc->source = src_phys;
275 desc->destination = dst_phys;
276 desc->size = actx->fill;
277 desc->payload = key_phys;
280 ret = mxs_dcp_start_dma(actx);
283 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
285 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
287 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
293 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
295 struct dcp *sdcp = global_sdcp;
297 struct skcipher_request *req = skcipher_request_cast(arq);
298 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
299 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
301 struct scatterlist *dst = req->dst;
302 struct scatterlist *src = req->src;
303 const int nents = sg_nents(req->src);
305 const int out_off = DCP_BUF_SZ;
306 uint8_t *in_buf = sdcp->coh->aes_in_buf;
307 uint8_t *out_buf = sdcp->coh->aes_out_buf;
309 uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
310 uint32_t dst_off = 0;
311 uint32_t last_out_len = 0;
313 uint8_t *key = sdcp->coh->aes_key;
317 unsigned int i, len, clen, rem = 0, tlen = 0;
319 bool limit_hit = false;
323 /* Copy the key from the temporary location. */
324 memcpy(key, actx->key, actx->key_len);
327 /* Copy the CBC IV just past the key. */
328 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
329 /* CBC needs the INIT set. */
332 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
335 for_each_sg(req->src, src, nents, i) {
336 src_buf = sg_virt(src);
337 len = sg_dma_len(src);
339 limit_hit = tlen > req->cryptlen;
342 len = req->cryptlen - (tlen - len);
345 if (actx->fill + len > out_off)
346 clen = out_off - actx->fill;
350 memcpy(in_buf + actx->fill, src_buf, clen);
356 * If we filled the buffer or this is the last SG,
359 if (actx->fill == out_off || sg_is_last(src) ||
361 ret = mxs_dcp_run_aes(actx, req, init);
367 last_out_len = actx->fill;
368 while (dst && actx->fill) {
370 dst_buf = sg_virt(dst);
373 rem = min(sg_dma_len(dst) - dst_off,
376 memcpy(dst_buf + dst_off, out_tmp, rem);
381 if (dst_off == sg_dma_len(dst)) {
395 /* Copy the IV for CBC for chaining */
398 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
401 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
408 static int dcp_chan_thread_aes(void *data)
410 struct dcp *sdcp = global_sdcp;
411 const int chan = DCP_CHAN_CRYPTO;
413 struct crypto_async_request *backlog;
414 struct crypto_async_request *arq;
418 while (!kthread_should_stop()) {
419 set_current_state(TASK_INTERRUPTIBLE);
421 spin_lock(&sdcp->lock[chan]);
422 backlog = crypto_get_backlog(&sdcp->queue[chan]);
423 arq = crypto_dequeue_request(&sdcp->queue[chan]);
424 spin_unlock(&sdcp->lock[chan]);
426 if (!backlog && !arq) {
431 set_current_state(TASK_RUNNING);
434 backlog->complete(backlog, -EINPROGRESS);
437 ret = mxs_dcp_aes_block_crypt(arq);
438 arq->complete(arq, ret);
445 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
447 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
448 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
449 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
452 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
453 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
454 req->base.complete, req->base.data);
455 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
456 req->cryptlen, req->iv);
459 ret = crypto_skcipher_encrypt(&rctx->fallback_req);
461 ret = crypto_skcipher_decrypt(&rctx->fallback_req);
466 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
468 struct dcp *sdcp = global_sdcp;
469 struct crypto_async_request *arq = &req->base;
470 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
471 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
474 if (unlikely(actx->key_len != AES_KEYSIZE_128))
475 return mxs_dcp_block_fallback(req, enc);
479 actx->chan = DCP_CHAN_CRYPTO;
481 spin_lock(&sdcp->lock[actx->chan]);
482 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
483 spin_unlock(&sdcp->lock[actx->chan]);
485 wake_up_process(sdcp->thread[actx->chan]);
490 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
492 return mxs_dcp_aes_enqueue(req, 0, 1);
495 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
497 return mxs_dcp_aes_enqueue(req, 1, 1);
500 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
502 return mxs_dcp_aes_enqueue(req, 0, 0);
505 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
507 return mxs_dcp_aes_enqueue(req, 1, 0);
510 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
513 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
516 * AES 128 is supposed by the hardware, store key into temporary
517 * buffer and exit. We must use the temporary buffer here, since
518 * there can still be an operation in progress.
521 if (len == AES_KEYSIZE_128) {
522 memcpy(actx->key, key, len);
527 * If the requested AES key size is not supported by the hardware,
528 * but is supported by in-kernel software implementation, we use
531 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
532 crypto_skcipher_set_flags(actx->fallback,
533 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
534 return crypto_skcipher_setkey(actx->fallback, key, len);
537 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
539 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
540 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
541 struct crypto_skcipher *blk;
543 blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
547 actx->fallback = blk;
548 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
549 crypto_skcipher_reqsize(blk));
553 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
555 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
557 crypto_free_skcipher(actx->fallback);
561 * Hashing (SHA1/SHA256)
563 static int mxs_dcp_run_sha(struct ahash_request *req)
565 struct dcp *sdcp = global_sdcp;
568 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
569 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
570 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
571 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
573 dma_addr_t digest_phys = 0;
574 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
575 DCP_BUF_SZ, DMA_TO_DEVICE);
577 ret = dma_mapping_error(sdcp->dev, buf_phys);
581 /* Fill in the DMA descriptor. */
582 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
583 MXS_DCP_CONTROL0_INTERRUPT |
584 MXS_DCP_CONTROL0_ENABLE_HASH;
586 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
588 desc->control1 = actx->alg;
589 desc->next_cmd_addr = 0;
590 desc->source = buf_phys;
591 desc->destination = 0;
592 desc->size = actx->fill;
597 * Align driver with hw behavior when generating null hashes
599 if (rctx->init && rctx->fini && desc->size == 0) {
600 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
601 const uint8_t *sha_buf =
602 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
603 sha1_null_hash : sha256_null_hash;
604 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
609 /* Set HASH_TERM bit for last transfer block. */
611 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
612 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
613 ret = dma_mapping_error(sdcp->dev, digest_phys);
617 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
618 desc->payload = digest_phys;
621 ret = mxs_dcp_start_dma(actx);
624 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
628 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
633 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
635 struct dcp *sdcp = global_sdcp;
637 struct ahash_request *req = ahash_request_cast(arq);
638 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
639 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
640 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
641 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
643 uint8_t *in_buf = sdcp->coh->sha_in_buf;
644 uint8_t *out_buf = sdcp->coh->sha_out_buf;
646 struct scatterlist *src;
648 unsigned int i, len, clen, oft = 0;
651 int fin = rctx->fini;
659 if (actx->fill + len > DCP_BUF_SZ)
660 clen = DCP_BUF_SZ - actx->fill;
664 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
672 * If we filled the buffer and still have some
673 * more data, submit the buffer.
675 if (len && actx->fill == DCP_BUF_SZ) {
676 ret = mxs_dcp_run_sha(req);
687 /* Submit whatever is left. */
691 ret = mxs_dcp_run_sha(req);
697 /* For some reason the result is flipped */
698 for (i = 0; i < halg->digestsize; i++)
699 req->result[i] = out_buf[halg->digestsize - i - 1];
705 static int dcp_chan_thread_sha(void *data)
707 struct dcp *sdcp = global_sdcp;
708 const int chan = DCP_CHAN_HASH_SHA;
710 struct crypto_async_request *backlog;
711 struct crypto_async_request *arq;
714 while (!kthread_should_stop()) {
715 set_current_state(TASK_INTERRUPTIBLE);
717 spin_lock(&sdcp->lock[chan]);
718 backlog = crypto_get_backlog(&sdcp->queue[chan]);
719 arq = crypto_dequeue_request(&sdcp->queue[chan]);
720 spin_unlock(&sdcp->lock[chan]);
722 if (!backlog && !arq) {
727 set_current_state(TASK_RUNNING);
730 backlog->complete(backlog, -EINPROGRESS);
733 ret = dcp_sha_req_to_buf(arq);
734 arq->complete(arq, ret);
741 static int dcp_sha_init(struct ahash_request *req)
743 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
744 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
746 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
749 * Start hashing session. The code below only inits the
750 * hashing session context, nothing more.
752 memset(actx, 0, sizeof(*actx));
754 if (strcmp(halg->base.cra_name, "sha1") == 0)
755 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
757 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
761 actx->chan = DCP_CHAN_HASH_SHA;
763 mutex_init(&actx->mutex);
768 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
770 struct dcp *sdcp = global_sdcp;
772 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
773 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
774 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
779 * Ignore requests that have no data in them and are not
780 * the trailing requests in the stream of requests.
782 if (!req->nbytes && !fini)
785 mutex_lock(&actx->mutex);
794 spin_lock(&sdcp->lock[actx->chan]);
795 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
796 spin_unlock(&sdcp->lock[actx->chan]);
798 wake_up_process(sdcp->thread[actx->chan]);
799 mutex_unlock(&actx->mutex);
804 static int dcp_sha_update(struct ahash_request *req)
806 return dcp_sha_update_fx(req, 0);
809 static int dcp_sha_final(struct ahash_request *req)
811 ahash_request_set_crypt(req, NULL, req->result, 0);
813 return dcp_sha_update_fx(req, 1);
816 static int dcp_sha_finup(struct ahash_request *req)
818 return dcp_sha_update_fx(req, 1);
821 static int dcp_sha_digest(struct ahash_request *req)
825 ret = dcp_sha_init(req);
829 return dcp_sha_finup(req);
832 static int dcp_sha_import(struct ahash_request *req, const void *in)
834 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
835 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
836 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
837 const struct dcp_export_state *export = in;
839 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
840 memset(actx, 0, sizeof(struct dcp_async_ctx));
841 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
842 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
847 static int dcp_sha_export(struct ahash_request *req, void *out)
849 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
850 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
851 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
852 struct dcp_export_state *export = out;
854 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
855 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
860 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
862 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
863 sizeof(struct dcp_sha_req_ctx));
867 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
871 /* AES 128 ECB and AES 128 CBC */
872 static struct skcipher_alg dcp_aes_algs[] = {
874 .base.cra_name = "ecb(aes)",
875 .base.cra_driver_name = "ecb-aes-dcp",
876 .base.cra_priority = 400,
877 .base.cra_alignmask = 15,
878 .base.cra_flags = CRYPTO_ALG_ASYNC |
879 CRYPTO_ALG_NEED_FALLBACK,
880 .base.cra_blocksize = AES_BLOCK_SIZE,
881 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
882 .base.cra_module = THIS_MODULE,
884 .min_keysize = AES_MIN_KEY_SIZE,
885 .max_keysize = AES_MAX_KEY_SIZE,
886 .setkey = mxs_dcp_aes_setkey,
887 .encrypt = mxs_dcp_aes_ecb_encrypt,
888 .decrypt = mxs_dcp_aes_ecb_decrypt,
889 .init = mxs_dcp_aes_fallback_init_tfm,
890 .exit = mxs_dcp_aes_fallback_exit_tfm,
892 .base.cra_name = "cbc(aes)",
893 .base.cra_driver_name = "cbc-aes-dcp",
894 .base.cra_priority = 400,
895 .base.cra_alignmask = 15,
896 .base.cra_flags = CRYPTO_ALG_ASYNC |
897 CRYPTO_ALG_NEED_FALLBACK,
898 .base.cra_blocksize = AES_BLOCK_SIZE,
899 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
900 .base.cra_module = THIS_MODULE,
902 .min_keysize = AES_MIN_KEY_SIZE,
903 .max_keysize = AES_MAX_KEY_SIZE,
904 .setkey = mxs_dcp_aes_setkey,
905 .encrypt = mxs_dcp_aes_cbc_encrypt,
906 .decrypt = mxs_dcp_aes_cbc_decrypt,
907 .ivsize = AES_BLOCK_SIZE,
908 .init = mxs_dcp_aes_fallback_init_tfm,
909 .exit = mxs_dcp_aes_fallback_exit_tfm,
914 static struct ahash_alg dcp_sha1_alg = {
915 .init = dcp_sha_init,
916 .update = dcp_sha_update,
917 .final = dcp_sha_final,
918 .finup = dcp_sha_finup,
919 .digest = dcp_sha_digest,
920 .import = dcp_sha_import,
921 .export = dcp_sha_export,
923 .digestsize = SHA1_DIGEST_SIZE,
924 .statesize = sizeof(struct dcp_export_state),
927 .cra_driver_name = "sha1-dcp",
930 .cra_flags = CRYPTO_ALG_ASYNC,
931 .cra_blocksize = SHA1_BLOCK_SIZE,
932 .cra_ctxsize = sizeof(struct dcp_async_ctx),
933 .cra_module = THIS_MODULE,
934 .cra_init = dcp_sha_cra_init,
935 .cra_exit = dcp_sha_cra_exit,
941 static struct ahash_alg dcp_sha256_alg = {
942 .init = dcp_sha_init,
943 .update = dcp_sha_update,
944 .final = dcp_sha_final,
945 .finup = dcp_sha_finup,
946 .digest = dcp_sha_digest,
947 .import = dcp_sha_import,
948 .export = dcp_sha_export,
950 .digestsize = SHA256_DIGEST_SIZE,
951 .statesize = sizeof(struct dcp_export_state),
953 .cra_name = "sha256",
954 .cra_driver_name = "sha256-dcp",
957 .cra_flags = CRYPTO_ALG_ASYNC,
958 .cra_blocksize = SHA256_BLOCK_SIZE,
959 .cra_ctxsize = sizeof(struct dcp_async_ctx),
960 .cra_module = THIS_MODULE,
961 .cra_init = dcp_sha_cra_init,
962 .cra_exit = dcp_sha_cra_exit,
967 static irqreturn_t mxs_dcp_irq(int irq, void *context)
969 struct dcp *sdcp = context;
973 stat = readl(sdcp->base + MXS_DCP_STAT);
974 stat &= MXS_DCP_STAT_IRQ_MASK;
978 /* Clear the interrupts. */
979 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
981 /* Complete the DMA requests that finished. */
982 for (i = 0; i < DCP_MAX_CHANS; i++)
984 complete(&sdcp->completion[i]);
989 static int mxs_dcp_probe(struct platform_device *pdev)
991 struct device *dev = &pdev->dev;
992 struct dcp *sdcp = NULL;
994 int dcp_vmi_irq, dcp_irq;
997 dev_err(dev, "Only one DCP instance allowed!\n");
1001 dcp_vmi_irq = platform_get_irq(pdev, 0);
1002 if (dcp_vmi_irq < 0)
1005 dcp_irq = platform_get_irq(pdev, 1);
1009 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1014 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1015 if (IS_ERR(sdcp->base))
1016 return PTR_ERR(sdcp->base);
1019 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1020 "dcp-vmi-irq", sdcp);
1022 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1026 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1029 dev_err(dev, "Failed to claim DCP IRQ!\n");
1033 /* Allocate coherent helper block. */
1034 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1039 /* Re-align the structure so it fits the DCP constraints. */
1040 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1042 /* DCP clock is optional, only used on some SOCs */
1043 sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1044 if (IS_ERR(sdcp->dcp_clk)) {
1045 if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1046 return PTR_ERR(sdcp->dcp_clk);
1047 sdcp->dcp_clk = NULL;
1049 ret = clk_prepare_enable(sdcp->dcp_clk);
1053 /* Restart the DCP block. */
1054 ret = stmp_reset_block(sdcp->base);
1056 dev_err(dev, "Failed reset\n");
1057 goto err_disable_unprepare_clk;
1060 /* Initialize control register. */
1061 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1062 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1063 sdcp->base + MXS_DCP_CTRL);
1065 /* Enable all DCP DMA channels. */
1066 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1067 sdcp->base + MXS_DCP_CHANNELCTRL);
1070 * We do not enable context switching. Give the context buffer a
1071 * pointer to an illegal address so if context switching is
1072 * inadvertantly enabled, the DCP will return an error instead of
1073 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1076 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1077 for (i = 0; i < DCP_MAX_CHANS; i++)
1078 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1079 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1083 platform_set_drvdata(pdev, sdcp);
1085 for (i = 0; i < DCP_MAX_CHANS; i++) {
1086 spin_lock_init(&sdcp->lock[i]);
1087 init_completion(&sdcp->completion[i]);
1088 crypto_init_queue(&sdcp->queue[i], 50);
1091 /* Create the SHA and AES handler threads. */
1092 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1093 NULL, "mxs_dcp_chan/sha");
1094 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1095 dev_err(dev, "Error starting SHA thread!\n");
1096 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1097 goto err_disable_unprepare_clk;
1100 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1101 NULL, "mxs_dcp_chan/aes");
1102 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1103 dev_err(dev, "Error starting SHA thread!\n");
1104 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1105 goto err_destroy_sha_thread;
1108 /* Register the various crypto algorithms. */
1109 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1111 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1112 ret = crypto_register_skciphers(dcp_aes_algs,
1113 ARRAY_SIZE(dcp_aes_algs));
1115 /* Failed to register algorithm. */
1116 dev_err(dev, "Failed to register AES crypto!\n");
1117 goto err_destroy_aes_thread;
1121 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1122 ret = crypto_register_ahash(&dcp_sha1_alg);
1124 dev_err(dev, "Failed to register %s hash!\n",
1125 dcp_sha1_alg.halg.base.cra_name);
1126 goto err_unregister_aes;
1130 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1131 ret = crypto_register_ahash(&dcp_sha256_alg);
1133 dev_err(dev, "Failed to register %s hash!\n",
1134 dcp_sha256_alg.halg.base.cra_name);
1135 goto err_unregister_sha1;
1141 err_unregister_sha1:
1142 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1143 crypto_unregister_ahash(&dcp_sha1_alg);
1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1147 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1149 err_destroy_aes_thread:
1150 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1152 err_destroy_sha_thread:
1153 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1155 err_disable_unprepare_clk:
1156 clk_disable_unprepare(sdcp->dcp_clk);
1161 static int mxs_dcp_remove(struct platform_device *pdev)
1163 struct dcp *sdcp = platform_get_drvdata(pdev);
1165 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1166 crypto_unregister_ahash(&dcp_sha256_alg);
1168 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1169 crypto_unregister_ahash(&dcp_sha1_alg);
1171 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1172 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1174 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1175 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1177 clk_disable_unprepare(sdcp->dcp_clk);
1179 platform_set_drvdata(pdev, NULL);
1186 static const struct of_device_id mxs_dcp_dt_ids[] = {
1187 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1188 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1192 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1194 static struct platform_driver mxs_dcp_driver = {
1195 .probe = mxs_dcp_probe,
1196 .remove = mxs_dcp_remove,
1199 .of_match_table = mxs_dcp_dt_ids,
1203 module_platform_driver(mxs_dcp_driver);
1205 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1206 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1207 MODULE_LICENSE("GPL");
1208 MODULE_ALIAS("platform:mxs-dcp");