1 // SPDX-License-Identifier: GPL-2.0-only
5 * Support for OMAP SHA1/MD5 HW acceleration.
7 * Copyright (c) 2010 Nokia Corporation
8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * Copyright (c) 2011 Texas Instruments Incorporated
11 * Some ideas are from old omap-sha1-md5.c driver.
14 #define pr_fmt(fmt) "%s: " fmt, __func__
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/irq.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/pm_runtime.h>
31 #include <linux/of_device.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/sha.h>
39 #include <crypto/hash.h>
40 #include <crypto/hmac.h>
41 #include <crypto/internal/hash.h>
43 #define MD5_DIGEST_SIZE 16
45 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
46 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
47 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
49 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
51 #define SHA_REG_CTRL 0x18
52 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
53 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
54 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
55 #define SHA_REG_CTRL_ALGO (1 << 2)
56 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
57 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
59 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
61 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
62 #define SHA_REG_MASK_DMA_EN (1 << 3)
63 #define SHA_REG_MASK_IT_EN (1 << 2)
64 #define SHA_REG_MASK_SOFTRESET (1 << 1)
65 #define SHA_REG_AUTOIDLE (1 << 0)
67 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
68 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
70 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
71 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
72 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
73 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
74 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
76 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
77 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
78 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
79 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
80 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
81 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
82 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
84 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
86 #define SHA_REG_IRQSTATUS 0x118
87 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
88 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
89 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
90 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
92 #define SHA_REG_IRQENA 0x11C
93 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
94 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
95 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
96 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
98 #define DEFAULT_TIMEOUT_INTERVAL HZ
100 #define DEFAULT_AUTOSUSPEND_DELAY 1000
102 /* mostly device flags */
104 #define FLAGS_FINAL 1
105 #define FLAGS_DMA_ACTIVE 2
106 #define FLAGS_OUTPUT_READY 3
109 #define FLAGS_DMA_READY 6
110 #define FLAGS_AUTO_XOR 7
111 #define FLAGS_BE32_SHA1 8
112 #define FLAGS_SGS_COPIED 9
113 #define FLAGS_SGS_ALLOCED 10
114 #define FLAGS_HUGE 11
117 #define FLAGS_FINUP 16
119 #define FLAGS_MODE_SHIFT 18
120 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
121 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
128 #define FLAGS_HMAC 21
129 #define FLAGS_ERROR 22
134 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
135 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
137 #define BUFLEN SHA512_BLOCK_SIZE
138 #define OMAP_SHA_DMA_THRESHOLD 256
140 #define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
142 struct omap_sham_dev;
144 struct omap_sham_reqctx {
145 struct omap_sham_dev *dd;
149 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
155 struct scatterlist *sg;
156 struct scatterlist sgl[2];
157 int offset; /* offset in current sg */
159 unsigned int total; /* total request */
161 u8 buffer[] OMAP_ALIGNED;
164 struct omap_sham_hmac_ctx {
165 struct crypto_shash *shash;
166 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
170 struct omap_sham_ctx {
174 struct crypto_shash *fallback;
176 struct omap_sham_hmac_ctx base[];
179 #define OMAP_SHAM_QUEUE_LENGTH 10
181 struct omap_sham_algs_info {
182 struct ahash_alg *algs_list;
184 unsigned int registered;
187 struct omap_sham_pdata {
188 struct omap_sham_algs_info *algs_info;
189 unsigned int algs_info_size;
193 void (*copy_hash)(struct ahash_request *req, int out);
194 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
196 void (*trigger)(struct omap_sham_dev *dd, size_t length);
197 int (*poll_irq)(struct omap_sham_dev *dd);
198 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
216 struct omap_sham_dev {
217 struct list_head list;
218 unsigned long phys_base;
220 void __iomem *io_base;
224 struct dma_chan *dma_lch;
225 struct tasklet_struct done_task;
227 u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
231 struct crypto_queue queue;
232 struct ahash_request *req;
234 const struct omap_sham_pdata *pdata;
237 struct omap_sham_drv {
238 struct list_head dev_list;
243 static struct omap_sham_drv sham = {
244 .dev_list = LIST_HEAD_INIT(sham.dev_list),
245 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
248 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
250 return __raw_readl(dd->io_base + offset);
253 static inline void omap_sham_write(struct omap_sham_dev *dd,
254 u32 offset, u32 value)
256 __raw_writel(value, dd->io_base + offset);
259 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
264 val = omap_sham_read(dd, address);
267 omap_sham_write(dd, address, val);
270 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
272 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
274 while (!(omap_sham_read(dd, offset) & bit)) {
275 if (time_is_before_jiffies(timeout))
282 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
284 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
285 struct omap_sham_dev *dd = ctx->dd;
286 u32 *hash = (u32 *)ctx->digest;
289 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
291 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
293 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
297 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
299 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
300 struct omap_sham_dev *dd = ctx->dd;
303 if (ctx->flags & BIT(FLAGS_HMAC)) {
304 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
305 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
306 struct omap_sham_hmac_ctx *bctx = tctx->base;
307 u32 *opad = (u32 *)bctx->opad;
309 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
311 opad[i] = omap_sham_read(dd,
312 SHA_REG_ODIGEST(dd, i));
314 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
319 omap_sham_copy_hash_omap2(req, out);
322 static void omap_sham_copy_ready_hash(struct ahash_request *req)
324 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
325 u32 *in = (u32 *)ctx->digest;
326 u32 *hash = (u32 *)req->result;
327 int i, d, big_endian = 0;
332 switch (ctx->flags & FLAGS_MODE_MASK) {
334 d = MD5_DIGEST_SIZE / sizeof(u32);
336 case FLAGS_MODE_SHA1:
337 /* OMAP2 SHA1 is big endian */
338 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
340 d = SHA1_DIGEST_SIZE / sizeof(u32);
342 case FLAGS_MODE_SHA224:
343 d = SHA224_DIGEST_SIZE / sizeof(u32);
345 case FLAGS_MODE_SHA256:
346 d = SHA256_DIGEST_SIZE / sizeof(u32);
348 case FLAGS_MODE_SHA384:
349 d = SHA384_DIGEST_SIZE / sizeof(u32);
351 case FLAGS_MODE_SHA512:
352 d = SHA512_DIGEST_SIZE / sizeof(u32);
359 for (i = 0; i < d; i++)
360 hash[i] = be32_to_cpu(in[i]);
362 for (i = 0; i < d; i++)
363 hash[i] = le32_to_cpu(in[i]);
366 static int omap_sham_hw_init(struct omap_sham_dev *dd)
370 err = pm_runtime_get_sync(dd->dev);
372 dev_err(dd->dev, "failed to get sync: %d\n", err);
376 if (!test_bit(FLAGS_INIT, &dd->flags)) {
377 set_bit(FLAGS_INIT, &dd->flags);
384 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
387 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
388 u32 val = length << 5, mask;
390 if (likely(ctx->digcnt))
391 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
393 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
394 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
395 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
397 * Setting ALGO_CONST only for the first iteration
398 * and CLOSE_HASH only for the last one.
400 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
401 val |= SHA_REG_CTRL_ALGO;
403 val |= SHA_REG_CTRL_ALGO_CONST;
405 val |= SHA_REG_CTRL_CLOSE_HASH;
407 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
408 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
410 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
413 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
417 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
419 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
422 static int get_block_size(struct omap_sham_reqctx *ctx)
426 switch (ctx->flags & FLAGS_MODE_MASK) {
428 case FLAGS_MODE_SHA1:
431 case FLAGS_MODE_SHA224:
432 case FLAGS_MODE_SHA256:
433 d = SHA256_BLOCK_SIZE;
435 case FLAGS_MODE_SHA384:
436 case FLAGS_MODE_SHA512:
437 d = SHA512_BLOCK_SIZE;
446 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
447 u32 *value, int count)
449 for (; count--; value++, offset += 4)
450 omap_sham_write(dd, offset, *value);
453 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
456 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
460 * Setting ALGO_CONST only for the first iteration and
461 * CLOSE_HASH only for the last one. Note that flags mode bits
462 * correspond to algorithm encoding in mode register.
464 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
466 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
467 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
468 struct omap_sham_hmac_ctx *bctx = tctx->base;
471 val |= SHA_REG_MODE_ALGO_CONSTANT;
473 if (ctx->flags & BIT(FLAGS_HMAC)) {
474 bs = get_block_size(ctx);
475 nr_dr = bs / (2 * sizeof(u32));
476 val |= SHA_REG_MODE_HMAC_KEY_PROC;
477 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
478 (u32 *)bctx->ipad, nr_dr);
479 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
480 (u32 *)bctx->ipad + nr_dr, nr_dr);
486 val |= SHA_REG_MODE_CLOSE_HASH;
488 if (ctx->flags & BIT(FLAGS_HMAC))
489 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
492 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
493 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
494 SHA_REG_MODE_HMAC_KEY_PROC;
496 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
497 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
498 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
499 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
501 (dma ? SHA_REG_MASK_DMA_EN : 0),
502 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
505 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
507 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
510 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
512 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
513 SHA_REG_IRQSTATUS_INPUT_RDY);
516 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
519 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
520 int count, len32, bs32, offset = 0;
523 struct sg_mapping_iter mi;
525 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
526 ctx->digcnt, length, final);
528 dd->pdata->write_ctrl(dd, length, final, 0);
529 dd->pdata->trigger(dd, length);
531 /* should be non-zero before next lines to disable clocks later */
532 ctx->digcnt += length;
533 ctx->total -= length;
536 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
538 set_bit(FLAGS_CPU, &dd->flags);
540 len32 = DIV_ROUND_UP(length, sizeof(u32));
541 bs32 = get_block_size(ctx) / sizeof(u32);
543 sg_miter_start(&mi, ctx->sg, ctx->sg_len,
544 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
549 if (dd->pdata->poll_irq(dd))
552 for (count = 0; count < min(len32, bs32); count++, offset++) {
557 pr_err("sg miter failure.\n");
563 omap_sham_write(dd, SHA_REG_DIN(dd, count),
567 len32 -= min(len32, bs32);
575 static void omap_sham_dma_callback(void *param)
577 struct omap_sham_dev *dd = param;
579 set_bit(FLAGS_DMA_READY, &dd->flags);
580 tasklet_schedule(&dd->done_task);
583 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
586 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
587 struct dma_async_tx_descriptor *tx;
588 struct dma_slave_config cfg;
591 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
592 ctx->digcnt, length, final);
594 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
595 dev_err(dd->dev, "dma_map_sg error\n");
599 memset(&cfg, 0, sizeof(cfg));
601 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
602 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
603 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
605 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
607 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
611 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
613 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
616 dev_err(dd->dev, "prep_slave_sg failed\n");
620 tx->callback = omap_sham_dma_callback;
621 tx->callback_param = dd;
623 dd->pdata->write_ctrl(dd, length, final, 1);
625 ctx->digcnt += length;
626 ctx->total -= length;
629 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
631 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
633 dmaengine_submit(tx);
634 dma_async_issue_pending(dd->dma_lch);
636 dd->pdata->trigger(dd, length);
641 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
642 struct scatterlist *sg, int bs, int new_len)
644 int n = sg_nents(sg);
645 struct scatterlist *tmp;
646 int offset = ctx->offset;
648 ctx->total = new_len;
653 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
657 sg_init_table(ctx->sg, n);
664 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
667 new_len -= ctx->bufcnt;
670 while (sg && new_len) {
671 int len = sg->length - offset;
674 offset -= sg->length;
684 sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
699 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
701 ctx->offset += new_len - ctx->bufcnt;
707 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
708 struct scatterlist *sg, int bs,
709 unsigned int new_len)
714 pages = get_order(new_len);
716 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
718 pr_err("Couldn't allocate pages for unaligned cases.\n");
723 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
725 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
726 min(new_len, ctx->total) - ctx->bufcnt, 0);
727 sg_init_table(ctx->sgl, 1);
728 sg_set_buf(ctx->sgl, buf, new_len);
730 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
732 ctx->offset += new_len - ctx->bufcnt;
734 ctx->total = new_len;
739 static int omap_sham_align_sgs(struct scatterlist *sg,
740 int nbytes, int bs, bool final,
741 struct omap_sham_reqctx *rctx)
746 struct scatterlist *sg_tmp = sg;
748 int offset = rctx->offset;
749 int bufcnt = rctx->bufcnt;
751 if (!sg || !sg->length || !nbytes) {
753 bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
754 sg_init_table(rctx->sgl, 1);
755 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
756 rctx->sg = rctx->sgl;
769 new_len = DIV_ROUND_UP(new_len, bs) * bs;
771 new_len = (new_len - 1) / bs * bs;
776 if (nbytes != new_len)
779 while (nbytes > 0 && sg_tmp) {
783 if (!IS_ALIGNED(bufcnt, bs)) {
795 #ifdef CONFIG_ZONE_DMA
796 if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
802 if (offset < sg_tmp->length) {
803 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
808 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
815 offset -= sg_tmp->length;
821 nbytes -= sg_tmp->length;
824 sg_tmp = sg_next(sg_tmp);
832 if (new_len > OMAP_SHA_MAX_DMA_LEN) {
833 new_len = OMAP_SHA_MAX_DMA_LEN;
838 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
840 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
842 rctx->total = new_len;
843 rctx->offset += new_len;
846 sg_init_table(rctx->sgl, 2);
847 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
848 sg_chain(rctx->sgl, 2, sg);
849 rctx->sg = rctx->sgl;
857 static int omap_sham_prepare_request(struct ahash_request *req, bool update)
859 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
863 bool final = rctx->flags & BIT(FLAGS_FINUP);
866 bs = get_block_size(rctx);
868 nbytes = rctx->bufcnt;
871 nbytes += req->nbytes - rctx->offset;
873 dev_dbg(rctx->dd->dev,
874 "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
875 __func__, nbytes, bs, rctx->total, rctx->offset,
881 rctx->total = nbytes;
883 if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
884 int len = bs - rctx->bufcnt % bs;
886 if (len > req->nbytes)
888 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
895 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
897 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
901 hash_later = nbytes - rctx->total;
905 if (hash_later && hash_later <= rctx->buflen) {
906 scatterwalk_map_and_copy(rctx->buffer,
908 req->nbytes - hash_later,
911 rctx->bufcnt = hash_later;
916 if (hash_later > rctx->buflen)
917 set_bit(FLAGS_HUGE, &rctx->dd->flags);
919 rctx->total = min(nbytes, rctx->total);
924 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
926 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
928 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
930 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
935 struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
937 struct omap_sham_dev *dd;
942 spin_lock_bh(&sham.lock);
943 dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
944 list_move_tail(&dd->list, &sham.dev_list);
946 spin_unlock_bh(&sham.lock);
951 static int omap_sham_init(struct ahash_request *req)
953 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
954 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
955 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
956 struct omap_sham_dev *dd;
961 dd = omap_sham_find_dev(ctx);
967 dev_dbg(dd->dev, "init: digest size: %d\n",
968 crypto_ahash_digestsize(tfm));
970 switch (crypto_ahash_digestsize(tfm)) {
971 case MD5_DIGEST_SIZE:
972 ctx->flags |= FLAGS_MODE_MD5;
973 bs = SHA1_BLOCK_SIZE;
975 case SHA1_DIGEST_SIZE:
976 ctx->flags |= FLAGS_MODE_SHA1;
977 bs = SHA1_BLOCK_SIZE;
979 case SHA224_DIGEST_SIZE:
980 ctx->flags |= FLAGS_MODE_SHA224;
981 bs = SHA224_BLOCK_SIZE;
983 case SHA256_DIGEST_SIZE:
984 ctx->flags |= FLAGS_MODE_SHA256;
985 bs = SHA256_BLOCK_SIZE;
987 case SHA384_DIGEST_SIZE:
988 ctx->flags |= FLAGS_MODE_SHA384;
989 bs = SHA384_BLOCK_SIZE;
991 case SHA512_DIGEST_SIZE:
992 ctx->flags |= FLAGS_MODE_SHA512;
993 bs = SHA512_BLOCK_SIZE;
1001 ctx->buflen = BUFLEN;
1003 if (tctx->flags & BIT(FLAGS_HMAC)) {
1004 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1005 struct omap_sham_hmac_ctx *bctx = tctx->base;
1007 memcpy(ctx->buffer, bctx->ipad, bs);
1011 ctx->flags |= BIT(FLAGS_HMAC);
1018 static int omap_sham_update_req(struct omap_sham_dev *dd)
1020 struct ahash_request *req = dd->req;
1021 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1023 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1024 !(dd->flags & BIT(FLAGS_HUGE));
1026 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
1027 ctx->total, ctx->digcnt, final);
1029 if (ctx->total < get_block_size(ctx) ||
1030 ctx->total < dd->fallback_sz)
1031 ctx->flags |= BIT(FLAGS_CPU);
1033 if (ctx->flags & BIT(FLAGS_CPU))
1034 err = omap_sham_xmit_cpu(dd, ctx->total, final);
1036 err = omap_sham_xmit_dma(dd, ctx->total, final);
1038 /* wait for dma completion before can take more data */
1039 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
1044 static int omap_sham_final_req(struct omap_sham_dev *dd)
1046 struct ahash_request *req = dd->req;
1047 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1048 int err = 0, use_dma = 1;
1050 if (dd->flags & BIT(FLAGS_HUGE))
1053 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1055 * faster to handle last block with cpu or
1056 * use cpu when dma is not present.
1061 err = omap_sham_xmit_dma(dd, ctx->total, 1);
1063 err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1067 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1072 static int omap_sham_finish_hmac(struct ahash_request *req)
1074 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1075 struct omap_sham_hmac_ctx *bctx = tctx->base;
1076 int bs = crypto_shash_blocksize(bctx->shash);
1077 int ds = crypto_shash_digestsize(bctx->shash);
1078 SHASH_DESC_ON_STACK(shash, bctx->shash);
1080 shash->tfm = bctx->shash;
1082 return crypto_shash_init(shash) ?:
1083 crypto_shash_update(shash, bctx->opad, bs) ?:
1084 crypto_shash_finup(shash, req->result, ds, req->result);
1087 static int omap_sham_finish(struct ahash_request *req)
1089 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1090 struct omap_sham_dev *dd = ctx->dd;
1094 omap_sham_copy_ready_hash(req);
1095 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1096 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
1097 err = omap_sham_finish_hmac(req);
1100 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
1105 static void omap_sham_finish_req(struct ahash_request *req, int err)
1107 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1108 struct omap_sham_dev *dd = ctx->dd;
1110 if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1111 free_pages((unsigned long)sg_virt(ctx->sg),
1112 get_order(ctx->sg->length));
1114 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1119 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
1121 if (dd->flags & BIT(FLAGS_HUGE)) {
1122 dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1123 BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
1124 omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1125 if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
1126 err = omap_sham_update_req(dd);
1127 if (err != -EINPROGRESS &&
1128 (ctx->flags & BIT(FLAGS_FINUP)))
1129 err = omap_sham_final_req(dd);
1130 } else if (ctx->op == OP_FINAL) {
1131 omap_sham_final_req(dd);
1137 dd->pdata->copy_hash(req, 1);
1138 if (test_bit(FLAGS_FINAL, &dd->flags))
1139 err = omap_sham_finish(req);
1141 ctx->flags |= BIT(FLAGS_ERROR);
1144 /* atomic operation is not needed here */
1145 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1146 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1148 pm_runtime_mark_last_busy(dd->dev);
1149 pm_runtime_put_autosuspend(dd->dev);
1153 if (req->base.complete)
1154 req->base.complete(&req->base, err);
1157 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1158 struct ahash_request *req)
1160 struct crypto_async_request *async_req, *backlog;
1161 struct omap_sham_reqctx *ctx;
1162 unsigned long flags;
1163 int err = 0, ret = 0;
1166 spin_lock_irqsave(&dd->lock, flags);
1168 ret = ahash_enqueue_request(&dd->queue, req);
1169 if (test_bit(FLAGS_BUSY, &dd->flags)) {
1170 spin_unlock_irqrestore(&dd->lock, flags);
1173 backlog = crypto_get_backlog(&dd->queue);
1174 async_req = crypto_dequeue_request(&dd->queue);
1176 set_bit(FLAGS_BUSY, &dd->flags);
1177 spin_unlock_irqrestore(&dd->lock, flags);
1183 backlog->complete(backlog, -EINPROGRESS);
1185 req = ahash_request_cast(async_req);
1187 ctx = ahash_request_ctx(req);
1189 err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1190 if (err || !ctx->total)
1193 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1194 ctx->op, req->nbytes);
1196 err = omap_sham_hw_init(dd);
1201 /* request has changed - restore hash */
1202 dd->pdata->copy_hash(req, 0);
1204 if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
1205 err = omap_sham_update_req(dd);
1206 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1207 /* no final() after finup() */
1208 err = omap_sham_final_req(dd);
1209 } else if (ctx->op == OP_FINAL) {
1210 err = omap_sham_final_req(dd);
1213 dev_dbg(dd->dev, "exit, err: %d\n", err);
1215 if (err != -EINPROGRESS) {
1216 /* done_task will not finish it, so do it here */
1217 omap_sham_finish_req(req, err);
1221 * Execute next request immediately if there is anything
1230 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1232 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1233 struct omap_sham_dev *dd = ctx->dd;
1237 return omap_sham_handle_queue(dd, req);
1240 static int omap_sham_update(struct ahash_request *req)
1242 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1243 struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1248 if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1249 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1251 ctx->bufcnt += req->nbytes;
1255 if (dd->polling_mode)
1256 ctx->flags |= BIT(FLAGS_CPU);
1258 return omap_sham_enqueue(req, OP_UPDATE);
1261 static int omap_sham_final_shash(struct ahash_request *req)
1263 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1264 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1268 * If we are running HMAC on limited hardware support, skip
1269 * the ipad in the beginning of the buffer if we are going for
1270 * software fallback algorithm.
1272 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1273 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1274 offset = get_block_size(ctx);
1276 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1277 ctx->bufcnt - offset, req->result);
1280 static int omap_sham_final(struct ahash_request *req)
1282 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1284 ctx->flags |= BIT(FLAGS_FINUP);
1286 if (ctx->flags & BIT(FLAGS_ERROR))
1287 return 0; /* uncompleted hash is not needed */
1290 * OMAP HW accel works only with buffers >= 9.
1291 * HMAC is always >= 9 because ipad == block size.
1292 * If buffersize is less than fallback_sz, we use fallback
1293 * SW encoding, as using DMA + HW in this case doesn't provide
1296 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1297 return omap_sham_final_shash(req);
1298 else if (ctx->bufcnt)
1299 return omap_sham_enqueue(req, OP_FINAL);
1301 /* copy ready hash (+ finalize hmac) */
1302 return omap_sham_finish(req);
1305 static int omap_sham_finup(struct ahash_request *req)
1307 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1310 ctx->flags |= BIT(FLAGS_FINUP);
1312 err1 = omap_sham_update(req);
1313 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1316 * final() has to be always called to cleanup resources
1317 * even if udpate() failed, except EINPROGRESS
1319 err2 = omap_sham_final(req);
1321 return err1 ?: err2;
1324 static int omap_sham_digest(struct ahash_request *req)
1326 return omap_sham_init(req) ?: omap_sham_finup(req);
1329 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1330 unsigned int keylen)
1332 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1333 struct omap_sham_hmac_ctx *bctx = tctx->base;
1334 int bs = crypto_shash_blocksize(bctx->shash);
1335 int ds = crypto_shash_digestsize(bctx->shash);
1338 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1343 err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1349 memcpy(bctx->ipad, key, keylen);
1352 memset(bctx->ipad + keylen, 0, bs - keylen);
1354 if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1355 memcpy(bctx->opad, bctx->ipad, bs);
1357 for (i = 0; i < bs; i++) {
1358 bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1359 bctx->opad[i] ^= HMAC_OPAD_VALUE;
1366 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1368 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1369 const char *alg_name = crypto_tfm_alg_name(tfm);
1371 /* Allocate a fallback and abort if it failed. */
1372 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1373 CRYPTO_ALG_NEED_FALLBACK);
1374 if (IS_ERR(tctx->fallback)) {
1375 pr_err("omap-sham: fallback driver '%s' "
1376 "could not be loaded.\n", alg_name);
1377 return PTR_ERR(tctx->fallback);
1380 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1381 sizeof(struct omap_sham_reqctx) + BUFLEN);
1384 struct omap_sham_hmac_ctx *bctx = tctx->base;
1385 tctx->flags |= BIT(FLAGS_HMAC);
1386 bctx->shash = crypto_alloc_shash(alg_base, 0,
1387 CRYPTO_ALG_NEED_FALLBACK);
1388 if (IS_ERR(bctx->shash)) {
1389 pr_err("omap-sham: base driver '%s' "
1390 "could not be loaded.\n", alg_base);
1391 crypto_free_shash(tctx->fallback);
1392 return PTR_ERR(bctx->shash);
1400 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1402 return omap_sham_cra_init_alg(tfm, NULL);
1405 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1407 return omap_sham_cra_init_alg(tfm, "sha1");
1410 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1412 return omap_sham_cra_init_alg(tfm, "sha224");
1415 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1417 return omap_sham_cra_init_alg(tfm, "sha256");
1420 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1422 return omap_sham_cra_init_alg(tfm, "md5");
1425 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1427 return omap_sham_cra_init_alg(tfm, "sha384");
1430 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1432 return omap_sham_cra_init_alg(tfm, "sha512");
1435 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1437 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1439 crypto_free_shash(tctx->fallback);
1440 tctx->fallback = NULL;
1442 if (tctx->flags & BIT(FLAGS_HMAC)) {
1443 struct omap_sham_hmac_ctx *bctx = tctx->base;
1444 crypto_free_shash(bctx->shash);
1448 static int omap_sham_export(struct ahash_request *req, void *out)
1450 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1452 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1457 static int omap_sham_import(struct ahash_request *req, const void *in)
1459 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1460 const struct omap_sham_reqctx *ctx_in = in;
1462 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1467 static struct ahash_alg algs_sha1_md5[] = {
1469 .init = omap_sham_init,
1470 .update = omap_sham_update,
1471 .final = omap_sham_final,
1472 .finup = omap_sham_finup,
1473 .digest = omap_sham_digest,
1474 .halg.digestsize = SHA1_DIGEST_SIZE,
1477 .cra_driver_name = "omap-sha1",
1478 .cra_priority = 400,
1479 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1481 CRYPTO_ALG_NEED_FALLBACK,
1482 .cra_blocksize = SHA1_BLOCK_SIZE,
1483 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1484 .cra_alignmask = OMAP_ALIGN_MASK,
1485 .cra_module = THIS_MODULE,
1486 .cra_init = omap_sham_cra_init,
1487 .cra_exit = omap_sham_cra_exit,
1491 .init = omap_sham_init,
1492 .update = omap_sham_update,
1493 .final = omap_sham_final,
1494 .finup = omap_sham_finup,
1495 .digest = omap_sham_digest,
1496 .halg.digestsize = MD5_DIGEST_SIZE,
1499 .cra_driver_name = "omap-md5",
1500 .cra_priority = 400,
1501 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1503 CRYPTO_ALG_NEED_FALLBACK,
1504 .cra_blocksize = SHA1_BLOCK_SIZE,
1505 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1506 .cra_alignmask = OMAP_ALIGN_MASK,
1507 .cra_module = THIS_MODULE,
1508 .cra_init = omap_sham_cra_init,
1509 .cra_exit = omap_sham_cra_exit,
1513 .init = omap_sham_init,
1514 .update = omap_sham_update,
1515 .final = omap_sham_final,
1516 .finup = omap_sham_finup,
1517 .digest = omap_sham_digest,
1518 .setkey = omap_sham_setkey,
1519 .halg.digestsize = SHA1_DIGEST_SIZE,
1521 .cra_name = "hmac(sha1)",
1522 .cra_driver_name = "omap-hmac-sha1",
1523 .cra_priority = 400,
1524 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1526 CRYPTO_ALG_NEED_FALLBACK,
1527 .cra_blocksize = SHA1_BLOCK_SIZE,
1528 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1529 sizeof(struct omap_sham_hmac_ctx),
1530 .cra_alignmask = OMAP_ALIGN_MASK,
1531 .cra_module = THIS_MODULE,
1532 .cra_init = omap_sham_cra_sha1_init,
1533 .cra_exit = omap_sham_cra_exit,
1537 .init = omap_sham_init,
1538 .update = omap_sham_update,
1539 .final = omap_sham_final,
1540 .finup = omap_sham_finup,
1541 .digest = omap_sham_digest,
1542 .setkey = omap_sham_setkey,
1543 .halg.digestsize = MD5_DIGEST_SIZE,
1545 .cra_name = "hmac(md5)",
1546 .cra_driver_name = "omap-hmac-md5",
1547 .cra_priority = 400,
1548 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1550 CRYPTO_ALG_NEED_FALLBACK,
1551 .cra_blocksize = SHA1_BLOCK_SIZE,
1552 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1553 sizeof(struct omap_sham_hmac_ctx),
1554 .cra_alignmask = OMAP_ALIGN_MASK,
1555 .cra_module = THIS_MODULE,
1556 .cra_init = omap_sham_cra_md5_init,
1557 .cra_exit = omap_sham_cra_exit,
1562 /* OMAP4 has some algs in addition to what OMAP2 has */
1563 static struct ahash_alg algs_sha224_sha256[] = {
1565 .init = omap_sham_init,
1566 .update = omap_sham_update,
1567 .final = omap_sham_final,
1568 .finup = omap_sham_finup,
1569 .digest = omap_sham_digest,
1570 .halg.digestsize = SHA224_DIGEST_SIZE,
1572 .cra_name = "sha224",
1573 .cra_driver_name = "omap-sha224",
1574 .cra_priority = 400,
1575 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1577 CRYPTO_ALG_NEED_FALLBACK,
1578 .cra_blocksize = SHA224_BLOCK_SIZE,
1579 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1580 .cra_alignmask = OMAP_ALIGN_MASK,
1581 .cra_module = THIS_MODULE,
1582 .cra_init = omap_sham_cra_init,
1583 .cra_exit = omap_sham_cra_exit,
1587 .init = omap_sham_init,
1588 .update = omap_sham_update,
1589 .final = omap_sham_final,
1590 .finup = omap_sham_finup,
1591 .digest = omap_sham_digest,
1592 .halg.digestsize = SHA256_DIGEST_SIZE,
1594 .cra_name = "sha256",
1595 .cra_driver_name = "omap-sha256",
1596 .cra_priority = 400,
1597 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1599 CRYPTO_ALG_NEED_FALLBACK,
1600 .cra_blocksize = SHA256_BLOCK_SIZE,
1601 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1602 .cra_alignmask = OMAP_ALIGN_MASK,
1603 .cra_module = THIS_MODULE,
1604 .cra_init = omap_sham_cra_init,
1605 .cra_exit = omap_sham_cra_exit,
1609 .init = omap_sham_init,
1610 .update = omap_sham_update,
1611 .final = omap_sham_final,
1612 .finup = omap_sham_finup,
1613 .digest = omap_sham_digest,
1614 .setkey = omap_sham_setkey,
1615 .halg.digestsize = SHA224_DIGEST_SIZE,
1617 .cra_name = "hmac(sha224)",
1618 .cra_driver_name = "omap-hmac-sha224",
1619 .cra_priority = 400,
1620 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1622 CRYPTO_ALG_NEED_FALLBACK,
1623 .cra_blocksize = SHA224_BLOCK_SIZE,
1624 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1625 sizeof(struct omap_sham_hmac_ctx),
1626 .cra_alignmask = OMAP_ALIGN_MASK,
1627 .cra_module = THIS_MODULE,
1628 .cra_init = omap_sham_cra_sha224_init,
1629 .cra_exit = omap_sham_cra_exit,
1633 .init = omap_sham_init,
1634 .update = omap_sham_update,
1635 .final = omap_sham_final,
1636 .finup = omap_sham_finup,
1637 .digest = omap_sham_digest,
1638 .setkey = omap_sham_setkey,
1639 .halg.digestsize = SHA256_DIGEST_SIZE,
1641 .cra_name = "hmac(sha256)",
1642 .cra_driver_name = "omap-hmac-sha256",
1643 .cra_priority = 400,
1644 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1646 CRYPTO_ALG_NEED_FALLBACK,
1647 .cra_blocksize = SHA256_BLOCK_SIZE,
1648 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1649 sizeof(struct omap_sham_hmac_ctx),
1650 .cra_alignmask = OMAP_ALIGN_MASK,
1651 .cra_module = THIS_MODULE,
1652 .cra_init = omap_sham_cra_sha256_init,
1653 .cra_exit = omap_sham_cra_exit,
1658 static struct ahash_alg algs_sha384_sha512[] = {
1660 .init = omap_sham_init,
1661 .update = omap_sham_update,
1662 .final = omap_sham_final,
1663 .finup = omap_sham_finup,
1664 .digest = omap_sham_digest,
1665 .halg.digestsize = SHA384_DIGEST_SIZE,
1667 .cra_name = "sha384",
1668 .cra_driver_name = "omap-sha384",
1669 .cra_priority = 400,
1670 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1672 CRYPTO_ALG_NEED_FALLBACK,
1673 .cra_blocksize = SHA384_BLOCK_SIZE,
1674 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1675 .cra_alignmask = OMAP_ALIGN_MASK,
1676 .cra_module = THIS_MODULE,
1677 .cra_init = omap_sham_cra_init,
1678 .cra_exit = omap_sham_cra_exit,
1682 .init = omap_sham_init,
1683 .update = omap_sham_update,
1684 .final = omap_sham_final,
1685 .finup = omap_sham_finup,
1686 .digest = omap_sham_digest,
1687 .halg.digestsize = SHA512_DIGEST_SIZE,
1689 .cra_name = "sha512",
1690 .cra_driver_name = "omap-sha512",
1691 .cra_priority = 400,
1692 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1694 CRYPTO_ALG_NEED_FALLBACK,
1695 .cra_blocksize = SHA512_BLOCK_SIZE,
1696 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1697 .cra_alignmask = OMAP_ALIGN_MASK,
1698 .cra_module = THIS_MODULE,
1699 .cra_init = omap_sham_cra_init,
1700 .cra_exit = omap_sham_cra_exit,
1704 .init = omap_sham_init,
1705 .update = omap_sham_update,
1706 .final = omap_sham_final,
1707 .finup = omap_sham_finup,
1708 .digest = omap_sham_digest,
1709 .setkey = omap_sham_setkey,
1710 .halg.digestsize = SHA384_DIGEST_SIZE,
1712 .cra_name = "hmac(sha384)",
1713 .cra_driver_name = "omap-hmac-sha384",
1714 .cra_priority = 400,
1715 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1717 CRYPTO_ALG_NEED_FALLBACK,
1718 .cra_blocksize = SHA384_BLOCK_SIZE,
1719 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1720 sizeof(struct omap_sham_hmac_ctx),
1721 .cra_alignmask = OMAP_ALIGN_MASK,
1722 .cra_module = THIS_MODULE,
1723 .cra_init = omap_sham_cra_sha384_init,
1724 .cra_exit = omap_sham_cra_exit,
1728 .init = omap_sham_init,
1729 .update = omap_sham_update,
1730 .final = omap_sham_final,
1731 .finup = omap_sham_finup,
1732 .digest = omap_sham_digest,
1733 .setkey = omap_sham_setkey,
1734 .halg.digestsize = SHA512_DIGEST_SIZE,
1736 .cra_name = "hmac(sha512)",
1737 .cra_driver_name = "omap-hmac-sha512",
1738 .cra_priority = 400,
1739 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1741 CRYPTO_ALG_NEED_FALLBACK,
1742 .cra_blocksize = SHA512_BLOCK_SIZE,
1743 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1744 sizeof(struct omap_sham_hmac_ctx),
1745 .cra_alignmask = OMAP_ALIGN_MASK,
1746 .cra_module = THIS_MODULE,
1747 .cra_init = omap_sham_cra_sha512_init,
1748 .cra_exit = omap_sham_cra_exit,
1753 static void omap_sham_done_task(unsigned long data)
1755 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1758 dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1760 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1761 omap_sham_handle_queue(dd, NULL);
1765 if (test_bit(FLAGS_CPU, &dd->flags)) {
1766 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1768 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1769 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1770 omap_sham_update_dma_stop(dd);
1776 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1777 /* hash or semi-hash ready */
1778 clear_bit(FLAGS_DMA_READY, &dd->flags);
1786 dev_dbg(dd->dev, "update done: err: %d\n", err);
1787 /* finish curent request */
1788 omap_sham_finish_req(dd->req, err);
1790 /* If we are not busy, process next req */
1791 if (!test_bit(FLAGS_BUSY, &dd->flags))
1792 omap_sham_handle_queue(dd, NULL);
1795 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1797 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1798 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1800 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1801 tasklet_schedule(&dd->done_task);
1807 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1809 struct omap_sham_dev *dd = dev_id;
1811 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1812 /* final -> allow device to go to power-saving mode */
1813 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1815 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1816 SHA_REG_CTRL_OUTPUT_READY);
1817 omap_sham_read(dd, SHA_REG_CTRL);
1819 return omap_sham_irq_common(dd);
1822 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1824 struct omap_sham_dev *dd = dev_id;
1826 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1828 return omap_sham_irq_common(dd);
1831 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1833 .algs_list = algs_sha1_md5,
1834 .size = ARRAY_SIZE(algs_sha1_md5),
1838 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1839 .algs_info = omap_sham_algs_info_omap2,
1840 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1841 .flags = BIT(FLAGS_BE32_SHA1),
1842 .digest_size = SHA1_DIGEST_SIZE,
1843 .copy_hash = omap_sham_copy_hash_omap2,
1844 .write_ctrl = omap_sham_write_ctrl_omap2,
1845 .trigger = omap_sham_trigger_omap2,
1846 .poll_irq = omap_sham_poll_irq_omap2,
1847 .intr_hdlr = omap_sham_irq_omap2,
1848 .idigest_ofs = 0x00,
1853 .sysstatus_ofs = 0x64,
1861 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1863 .algs_list = algs_sha1_md5,
1864 .size = ARRAY_SIZE(algs_sha1_md5),
1867 .algs_list = algs_sha224_sha256,
1868 .size = ARRAY_SIZE(algs_sha224_sha256),
1872 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1873 .algs_info = omap_sham_algs_info_omap4,
1874 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1875 .flags = BIT(FLAGS_AUTO_XOR),
1876 .digest_size = SHA256_DIGEST_SIZE,
1877 .copy_hash = omap_sham_copy_hash_omap4,
1878 .write_ctrl = omap_sham_write_ctrl_omap4,
1879 .trigger = omap_sham_trigger_omap4,
1880 .poll_irq = omap_sham_poll_irq_omap4,
1881 .intr_hdlr = omap_sham_irq_omap4,
1882 .idigest_ofs = 0x020,
1885 .digcnt_ofs = 0x040,
1888 .sysstatus_ofs = 0x114,
1891 .major_mask = 0x0700,
1893 .minor_mask = 0x003f,
1897 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1899 .algs_list = algs_sha1_md5,
1900 .size = ARRAY_SIZE(algs_sha1_md5),
1903 .algs_list = algs_sha224_sha256,
1904 .size = ARRAY_SIZE(algs_sha224_sha256),
1907 .algs_list = algs_sha384_sha512,
1908 .size = ARRAY_SIZE(algs_sha384_sha512),
1912 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1913 .algs_info = omap_sham_algs_info_omap5,
1914 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1915 .flags = BIT(FLAGS_AUTO_XOR),
1916 .digest_size = SHA512_DIGEST_SIZE,
1917 .copy_hash = omap_sham_copy_hash_omap4,
1918 .write_ctrl = omap_sham_write_ctrl_omap4,
1919 .trigger = omap_sham_trigger_omap4,
1920 .poll_irq = omap_sham_poll_irq_omap4,
1921 .intr_hdlr = omap_sham_irq_omap4,
1922 .idigest_ofs = 0x240,
1923 .odigest_ofs = 0x200,
1925 .digcnt_ofs = 0x280,
1928 .sysstatus_ofs = 0x114,
1930 .length_ofs = 0x288,
1931 .major_mask = 0x0700,
1933 .minor_mask = 0x003f,
1937 static const struct of_device_id omap_sham_of_match[] = {
1939 .compatible = "ti,omap2-sham",
1940 .data = &omap_sham_pdata_omap2,
1943 .compatible = "ti,omap3-sham",
1944 .data = &omap_sham_pdata_omap2,
1947 .compatible = "ti,omap4-sham",
1948 .data = &omap_sham_pdata_omap4,
1951 .compatible = "ti,omap5-sham",
1952 .data = &omap_sham_pdata_omap5,
1956 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1958 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1959 struct device *dev, struct resource *res)
1961 struct device_node *node = dev->of_node;
1964 dd->pdata = of_device_get_match_data(dev);
1966 dev_err(dev, "no compatible OF match\n");
1971 err = of_address_to_resource(node, 0, res);
1973 dev_err(dev, "can't translate OF node address\n");
1978 dd->irq = irq_of_parse_and_map(node, 0);
1980 dev_err(dev, "can't translate OF irq value\n");
1989 static const struct of_device_id omap_sham_of_match[] = {
1993 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1994 struct device *dev, struct resource *res)
2000 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
2001 struct platform_device *pdev, struct resource *res)
2003 struct device *dev = &pdev->dev;
2007 /* Get the base address */
2008 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2010 dev_err(dev, "no MEM resource info\n");
2014 memcpy(res, r, sizeof(*res));
2017 dd->irq = platform_get_irq(pdev, 0);
2023 /* Only OMAP2/3 can be non-DT */
2024 dd->pdata = &omap_sham_pdata_omap2;
2030 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
2033 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2035 return sprintf(buf, "%d\n", dd->fallback_sz);
2038 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2039 const char *buf, size_t size)
2041 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2045 status = kstrtol(buf, 0, &value);
2049 /* HW accelerator only works with buffers > 9 */
2051 dev_err(dev, "minimum fallback size 9\n");
2055 dd->fallback_sz = value;
2060 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2063 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2065 return sprintf(buf, "%d\n", dd->queue.max_qlen);
2068 static ssize_t queue_len_store(struct device *dev,
2069 struct device_attribute *attr, const char *buf,
2072 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2075 unsigned long flags;
2077 status = kstrtol(buf, 0, &value);
2085 * Changing the queue size in fly is safe, if size becomes smaller
2086 * than current size, it will just not accept new entries until
2087 * it has shrank enough.
2089 spin_lock_irqsave(&dd->lock, flags);
2090 dd->queue.max_qlen = value;
2091 spin_unlock_irqrestore(&dd->lock, flags);
2096 static DEVICE_ATTR_RW(queue_len);
2097 static DEVICE_ATTR_RW(fallback);
2099 static struct attribute *omap_sham_attrs[] = {
2100 &dev_attr_queue_len.attr,
2101 &dev_attr_fallback.attr,
2105 static struct attribute_group omap_sham_attr_group = {
2106 .attrs = omap_sham_attrs,
2109 static int omap_sham_probe(struct platform_device *pdev)
2111 struct omap_sham_dev *dd;
2112 struct device *dev = &pdev->dev;
2113 struct resource res;
2114 dma_cap_mask_t mask;
2118 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2120 dev_err(dev, "unable to alloc data struct.\n");
2125 platform_set_drvdata(pdev, dd);
2127 INIT_LIST_HEAD(&dd->list);
2128 spin_lock_init(&dd->lock);
2129 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2130 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2132 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2133 omap_sham_get_res_pdev(dd, pdev, &res);
2137 dd->io_base = devm_ioremap_resource(dev, &res);
2138 if (IS_ERR(dd->io_base)) {
2139 err = PTR_ERR(dd->io_base);
2142 dd->phys_base = res.start;
2144 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2145 IRQF_TRIGGER_NONE, dev_name(dev), dd);
2147 dev_err(dev, "unable to request irq %d, err = %d\n",
2153 dma_cap_set(DMA_SLAVE, mask);
2155 dd->dma_lch = dma_request_chan(dev, "rx");
2156 if (IS_ERR(dd->dma_lch)) {
2157 err = PTR_ERR(dd->dma_lch);
2158 if (err == -EPROBE_DEFER)
2161 dd->polling_mode = 1;
2162 dev_dbg(dev, "using polling mode instead of dma\n");
2165 dd->flags |= dd->pdata->flags;
2166 sham.flags |= dd->pdata->flags;
2168 pm_runtime_use_autosuspend(dev);
2169 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2171 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2173 pm_runtime_enable(dev);
2174 pm_runtime_irq_safe(dev);
2176 err = pm_runtime_get_sync(dev);
2178 dev_err(dev, "failed to get sync: %d\n", err);
2182 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2183 pm_runtime_put_sync(&pdev->dev);
2185 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2186 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2187 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2189 spin_lock(&sham.lock);
2190 list_add_tail(&dd->list, &sham.dev_list);
2191 spin_unlock(&sham.lock);
2193 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2194 if (dd->pdata->algs_info[i].registered)
2197 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2198 struct ahash_alg *alg;
2200 alg = &dd->pdata->algs_info[i].algs_list[j];
2201 alg->export = omap_sham_export;
2202 alg->import = omap_sham_import;
2203 alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2205 err = crypto_register_ahash(alg);
2209 dd->pdata->algs_info[i].registered++;
2213 err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2215 dev_err(dev, "could not create sysfs device attrs\n");
2222 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2223 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2224 crypto_unregister_ahash(
2225 &dd->pdata->algs_info[i].algs_list[j]);
2227 pm_runtime_disable(dev);
2228 if (!dd->polling_mode)
2229 dma_release_channel(dd->dma_lch);
2231 dev_err(dev, "initialization failed.\n");
2236 static int omap_sham_remove(struct platform_device *pdev)
2238 struct omap_sham_dev *dd;
2241 dd = platform_get_drvdata(pdev);
2244 spin_lock(&sham.lock);
2245 list_del(&dd->list);
2246 spin_unlock(&sham.lock);
2247 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2248 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2249 crypto_unregister_ahash(
2250 &dd->pdata->algs_info[i].algs_list[j]);
2251 dd->pdata->algs_info[i].registered--;
2253 tasklet_kill(&dd->done_task);
2254 pm_runtime_disable(&pdev->dev);
2256 if (!dd->polling_mode)
2257 dma_release_channel(dd->dma_lch);
2259 sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2264 #ifdef CONFIG_PM_SLEEP
2265 static int omap_sham_suspend(struct device *dev)
2267 pm_runtime_put_sync(dev);
2271 static int omap_sham_resume(struct device *dev)
2273 int err = pm_runtime_get_sync(dev);
2275 dev_err(dev, "failed to get sync: %d\n", err);
2282 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2284 static struct platform_driver omap_sham_driver = {
2285 .probe = omap_sham_probe,
2286 .remove = omap_sham_remove,
2288 .name = "omap-sham",
2289 .pm = &omap_sham_pm_ops,
2290 .of_match_table = omap_sham_of_match,
2294 module_platform_driver(omap_sham_driver);
2296 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2297 MODULE_LICENSE("GPL v2");
2298 MODULE_AUTHOR("Dmitry Kasatkin");
2299 MODULE_ALIAS("platform:omap-sham");