Merge tag 'exfat-for-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linki...
[linux-2.6-microblaze.git] / drivers / crypto / stm32 / stm32-hash.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8
9 #include <linux/clk.h>
10 #include <linux/crypto.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23
24 #include <crypto/engine.h>
25 #include <crypto/hash.h>
26 #include <crypto/md5.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/sha1.h>
29 #include <crypto/sha2.h>
30 #include <crypto/internal/hash.h>
31
32 #define HASH_CR                         0x00
33 #define HASH_DIN                        0x04
34 #define HASH_STR                        0x08
35 #define HASH_IMR                        0x20
36 #define HASH_SR                         0x24
37 #define HASH_CSR(x)                     (0x0F8 + ((x) * 0x04))
38 #define HASH_HREG(x)                    (0x310 + ((x) * 0x04))
39 #define HASH_HWCFGR                     0x3F0
40 #define HASH_VER                        0x3F4
41 #define HASH_ID                         0x3F8
42
43 /* Control Register */
44 #define HASH_CR_INIT                    BIT(2)
45 #define HASH_CR_DMAE                    BIT(3)
46 #define HASH_CR_DATATYPE_POS            4
47 #define HASH_CR_MODE                    BIT(6)
48 #define HASH_CR_MDMAT                   BIT(13)
49 #define HASH_CR_DMAA                    BIT(14)
50 #define HASH_CR_LKEY                    BIT(16)
51
52 #define HASH_CR_ALGO_SHA1               0x0
53 #define HASH_CR_ALGO_MD5                0x80
54 #define HASH_CR_ALGO_SHA224             0x40000
55 #define HASH_CR_ALGO_SHA256             0x40080
56
57 /* Interrupt */
58 #define HASH_DINIE                      BIT(0)
59 #define HASH_DCIE                       BIT(1)
60
61 /* Interrupt Mask */
62 #define HASH_MASK_CALC_COMPLETION       BIT(0)
63 #define HASH_MASK_DATA_INPUT            BIT(1)
64
65 /* Context swap register */
66 #define HASH_CSR_REGISTER_NUMBER        53
67
68 /* Status Flags */
69 #define HASH_SR_DATA_INPUT_READY        BIT(0)
70 #define HASH_SR_OUTPUT_READY            BIT(1)
71 #define HASH_SR_DMA_ACTIVE              BIT(2)
72 #define HASH_SR_BUSY                    BIT(3)
73
74 /* STR Register */
75 #define HASH_STR_NBLW_MASK              GENMASK(4, 0)
76 #define HASH_STR_DCAL                   BIT(8)
77
78 #define HASH_FLAGS_INIT                 BIT(0)
79 #define HASH_FLAGS_OUTPUT_READY         BIT(1)
80 #define HASH_FLAGS_CPU                  BIT(2)
81 #define HASH_FLAGS_DMA_READY            BIT(3)
82 #define HASH_FLAGS_DMA_ACTIVE           BIT(4)
83 #define HASH_FLAGS_HMAC_INIT            BIT(5)
84 #define HASH_FLAGS_HMAC_FINAL           BIT(6)
85 #define HASH_FLAGS_HMAC_KEY             BIT(7)
86
87 #define HASH_FLAGS_FINAL                BIT(15)
88 #define HASH_FLAGS_FINUP                BIT(16)
89 #define HASH_FLAGS_ALGO_MASK            GENMASK(21, 18)
90 #define HASH_FLAGS_MD5                  BIT(18)
91 #define HASH_FLAGS_SHA1                 BIT(19)
92 #define HASH_FLAGS_SHA224               BIT(20)
93 #define HASH_FLAGS_SHA256               BIT(21)
94 #define HASH_FLAGS_ERRORS               BIT(22)
95 #define HASH_FLAGS_HMAC                 BIT(23)
96
97 #define HASH_OP_UPDATE                  1
98 #define HASH_OP_FINAL                   2
99
100 enum stm32_hash_data_format {
101         HASH_DATA_32_BITS               = 0x0,
102         HASH_DATA_16_BITS               = 0x1,
103         HASH_DATA_8_BITS                = 0x2,
104         HASH_DATA_1_BIT                 = 0x3
105 };
106
107 #define HASH_BUFLEN                     256
108 #define HASH_LONG_KEY                   64
109 #define HASH_MAX_KEY_SIZE               (SHA256_BLOCK_SIZE * 8)
110 #define HASH_QUEUE_LENGTH               16
111 #define HASH_DMA_THRESHOLD              50
112
113 #define HASH_AUTOSUSPEND_DELAY          50
114
115 struct stm32_hash_ctx {
116         struct crypto_engine_ctx enginectx;
117         struct stm32_hash_dev   *hdev;
118         unsigned long           flags;
119
120         u8                      key[HASH_MAX_KEY_SIZE];
121         int                     keylen;
122 };
123
124 struct stm32_hash_request_ctx {
125         struct stm32_hash_dev   *hdev;
126         unsigned long           flags;
127         unsigned long           op;
128
129         u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
130         size_t                  digcnt;
131         size_t                  bufcnt;
132         size_t                  buflen;
133
134         /* DMA */
135         struct scatterlist      *sg;
136         unsigned int            offset;
137         unsigned int            total;
138         struct scatterlist      sg_key;
139
140         dma_addr_t              dma_addr;
141         size_t                  dma_ct;
142         int                     nents;
143
144         u8                      data_type;
145
146         u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
147
148         /* Export Context */
149         u32                     *hw_context;
150 };
151
152 struct stm32_hash_algs_info {
153         struct ahash_alg        *algs_list;
154         size_t                  size;
155 };
156
157 struct stm32_hash_pdata {
158         struct stm32_hash_algs_info     *algs_info;
159         size_t                          algs_info_size;
160 };
161
162 struct stm32_hash_dev {
163         struct list_head        list;
164         struct device           *dev;
165         struct clk              *clk;
166         struct reset_control    *rst;
167         void __iomem            *io_base;
168         phys_addr_t             phys_base;
169         u32                     dma_mode;
170         u32                     dma_maxburst;
171
172         struct ahash_request    *req;
173         struct crypto_engine    *engine;
174
175         int                     err;
176         unsigned long           flags;
177
178         struct dma_chan         *dma_lch;
179         struct completion       dma_completion;
180
181         const struct stm32_hash_pdata   *pdata;
182 };
183
184 struct stm32_hash_drv {
185         struct list_head        dev_list;
186         spinlock_t              lock; /* List protection access */
187 };
188
189 static struct stm32_hash_drv stm32_hash = {
190         .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
191         .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
192 };
193
194 static void stm32_hash_dma_callback(void *param);
195
196 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
197 {
198         return readl_relaxed(hdev->io_base + offset);
199 }
200
201 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
202                                     u32 offset, u32 value)
203 {
204         writel_relaxed(value, hdev->io_base + offset);
205 }
206
207 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
208 {
209         u32 status;
210
211         return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
212                                    !(status & HASH_SR_BUSY), 10, 10000);
213 }
214
215 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
216 {
217         u32 reg;
218
219         reg = stm32_hash_read(hdev, HASH_STR);
220         reg &= ~(HASH_STR_NBLW_MASK);
221         reg |= (8U * ((length) % 4U));
222         stm32_hash_write(hdev, HASH_STR, reg);
223 }
224
225 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
226 {
227         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
228         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
229         u32 reg;
230         int keylen = ctx->keylen;
231         void *key = ctx->key;
232
233         if (keylen) {
234                 stm32_hash_set_nblw(hdev, keylen);
235
236                 while (keylen > 0) {
237                         stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
238                         keylen -= 4;
239                         key += 4;
240                 }
241
242                 reg = stm32_hash_read(hdev, HASH_STR);
243                 reg |= HASH_STR_DCAL;
244                 stm32_hash_write(hdev, HASH_STR, reg);
245
246                 return -EINPROGRESS;
247         }
248
249         return 0;
250 }
251
252 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
253 {
254         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
255         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
256         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
257
258         u32 reg = HASH_CR_INIT;
259
260         if (!(hdev->flags & HASH_FLAGS_INIT)) {
261                 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
262                 case HASH_FLAGS_MD5:
263                         reg |= HASH_CR_ALGO_MD5;
264                         break;
265                 case HASH_FLAGS_SHA1:
266                         reg |= HASH_CR_ALGO_SHA1;
267                         break;
268                 case HASH_FLAGS_SHA224:
269                         reg |= HASH_CR_ALGO_SHA224;
270                         break;
271                 case HASH_FLAGS_SHA256:
272                         reg |= HASH_CR_ALGO_SHA256;
273                         break;
274                 default:
275                         reg |= HASH_CR_ALGO_MD5;
276                 }
277
278                 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
279
280                 if (rctx->flags & HASH_FLAGS_HMAC) {
281                         hdev->flags |= HASH_FLAGS_HMAC;
282                         reg |= HASH_CR_MODE;
283                         if (ctx->keylen > HASH_LONG_KEY)
284                                 reg |= HASH_CR_LKEY;
285                 }
286
287                 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
288
289                 stm32_hash_write(hdev, HASH_CR, reg);
290
291                 hdev->flags |= HASH_FLAGS_INIT;
292
293                 dev_dbg(hdev->dev, "Write Control %x\n", reg);
294         }
295 }
296
297 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
298 {
299         size_t count;
300
301         while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
302                 count = min(rctx->sg->length - rctx->offset, rctx->total);
303                 count = min(count, rctx->buflen - rctx->bufcnt);
304
305                 if (count <= 0) {
306                         if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
307                                 rctx->sg = sg_next(rctx->sg);
308                                 continue;
309                         } else {
310                                 break;
311                         }
312                 }
313
314                 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
315                                          rctx->offset, count, 0);
316
317                 rctx->bufcnt += count;
318                 rctx->offset += count;
319                 rctx->total -= count;
320
321                 if (rctx->offset == rctx->sg->length) {
322                         rctx->sg = sg_next(rctx->sg);
323                         if (rctx->sg)
324                                 rctx->offset = 0;
325                         else
326                                 rctx->total = 0;
327                 }
328         }
329 }
330
331 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
332                                const u8 *buf, size_t length, int final)
333 {
334         unsigned int count, len32;
335         const u32 *buffer = (const u32 *)buf;
336         u32 reg;
337
338         if (final)
339                 hdev->flags |= HASH_FLAGS_FINAL;
340
341         len32 = DIV_ROUND_UP(length, sizeof(u32));
342
343         dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
344                 __func__, length, final, len32);
345
346         hdev->flags |= HASH_FLAGS_CPU;
347
348         stm32_hash_write_ctrl(hdev);
349
350         if (stm32_hash_wait_busy(hdev))
351                 return -ETIMEDOUT;
352
353         if ((hdev->flags & HASH_FLAGS_HMAC) &&
354             (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
355                 hdev->flags |= HASH_FLAGS_HMAC_KEY;
356                 stm32_hash_write_key(hdev);
357                 if (stm32_hash_wait_busy(hdev))
358                         return -ETIMEDOUT;
359         }
360
361         for (count = 0; count < len32; count++)
362                 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
363
364         if (final) {
365                 stm32_hash_set_nblw(hdev, length);
366                 reg = stm32_hash_read(hdev, HASH_STR);
367                 reg |= HASH_STR_DCAL;
368                 stm32_hash_write(hdev, HASH_STR, reg);
369                 if (hdev->flags & HASH_FLAGS_HMAC) {
370                         if (stm32_hash_wait_busy(hdev))
371                                 return -ETIMEDOUT;
372                         stm32_hash_write_key(hdev);
373                 }
374                 return -EINPROGRESS;
375         }
376
377         return 0;
378 }
379
380 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
381 {
382         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
383         int bufcnt, err = 0, final;
384
385         dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
386
387         final = (rctx->flags & HASH_FLAGS_FINUP);
388
389         while ((rctx->total >= rctx->buflen) ||
390                (rctx->bufcnt + rctx->total >= rctx->buflen)) {
391                 stm32_hash_append_sg(rctx);
392                 bufcnt = rctx->bufcnt;
393                 rctx->bufcnt = 0;
394                 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
395         }
396
397         stm32_hash_append_sg(rctx);
398
399         if (final) {
400                 bufcnt = rctx->bufcnt;
401                 rctx->bufcnt = 0;
402                 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
403                                           (rctx->flags & HASH_FLAGS_FINUP));
404         }
405
406         return err;
407 }
408
409 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
410                                struct scatterlist *sg, int length, int mdma)
411 {
412         struct dma_async_tx_descriptor *in_desc;
413         dma_cookie_t cookie;
414         u32 reg;
415         int err;
416
417         in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
418                                           DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
419                                           DMA_CTRL_ACK);
420         if (!in_desc) {
421                 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
422                 return -ENOMEM;
423         }
424
425         reinit_completion(&hdev->dma_completion);
426         in_desc->callback = stm32_hash_dma_callback;
427         in_desc->callback_param = hdev;
428
429         hdev->flags |= HASH_FLAGS_FINAL;
430         hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
431
432         reg = stm32_hash_read(hdev, HASH_CR);
433
434         if (mdma)
435                 reg |= HASH_CR_MDMAT;
436         else
437                 reg &= ~HASH_CR_MDMAT;
438
439         reg |= HASH_CR_DMAE;
440
441         stm32_hash_write(hdev, HASH_CR, reg);
442
443         stm32_hash_set_nblw(hdev, length);
444
445         cookie = dmaengine_submit(in_desc);
446         err = dma_submit_error(cookie);
447         if (err)
448                 return -ENOMEM;
449
450         dma_async_issue_pending(hdev->dma_lch);
451
452         if (!wait_for_completion_timeout(&hdev->dma_completion,
453                                          msecs_to_jiffies(100)))
454                 err = -ETIMEDOUT;
455
456         if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
457                                      NULL, NULL) != DMA_COMPLETE)
458                 err = -ETIMEDOUT;
459
460         if (err) {
461                 dev_err(hdev->dev, "DMA Error %i\n", err);
462                 dmaengine_terminate_all(hdev->dma_lch);
463                 return err;
464         }
465
466         return -EINPROGRESS;
467 }
468
469 static void stm32_hash_dma_callback(void *param)
470 {
471         struct stm32_hash_dev *hdev = param;
472
473         complete(&hdev->dma_completion);
474
475         hdev->flags |= HASH_FLAGS_DMA_READY;
476 }
477
478 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
479 {
480         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
481         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
482         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
483         int err;
484
485         if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
486                 err = stm32_hash_write_key(hdev);
487                 if (stm32_hash_wait_busy(hdev))
488                         return -ETIMEDOUT;
489         } else {
490                 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
491                         sg_init_one(&rctx->sg_key, ctx->key,
492                                     ALIGN(ctx->keylen, sizeof(u32)));
493
494                 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
495                                           DMA_TO_DEVICE);
496                 if (rctx->dma_ct == 0) {
497                         dev_err(hdev->dev, "dma_map_sg error\n");
498                         return -ENOMEM;
499                 }
500
501                 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
502
503                 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
504         }
505
506         return err;
507 }
508
509 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
510 {
511         struct dma_slave_config dma_conf;
512         struct dma_chan *chan;
513         int err;
514
515         memset(&dma_conf, 0, sizeof(dma_conf));
516
517         dma_conf.direction = DMA_MEM_TO_DEV;
518         dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
519         dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
520         dma_conf.src_maxburst = hdev->dma_maxburst;
521         dma_conf.dst_maxburst = hdev->dma_maxburst;
522         dma_conf.device_fc = false;
523
524         chan = dma_request_chan(hdev->dev, "in");
525         if (IS_ERR(chan))
526                 return PTR_ERR(chan);
527
528         hdev->dma_lch = chan;
529
530         err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
531         if (err) {
532                 dma_release_channel(hdev->dma_lch);
533                 hdev->dma_lch = NULL;
534                 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
535                 return err;
536         }
537
538         init_completion(&hdev->dma_completion);
539
540         return 0;
541 }
542
543 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
544 {
545         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
546         struct scatterlist sg[1], *tsg;
547         int err = 0, len = 0, reg, ncp = 0;
548         unsigned int i;
549         u32 *buffer = (void *)rctx->buffer;
550
551         rctx->sg = hdev->req->src;
552         rctx->total = hdev->req->nbytes;
553
554         rctx->nents = sg_nents(rctx->sg);
555
556         if (rctx->nents < 0)
557                 return -EINVAL;
558
559         stm32_hash_write_ctrl(hdev);
560
561         if (hdev->flags & HASH_FLAGS_HMAC) {
562                 err = stm32_hash_hmac_dma_send(hdev);
563                 if (err != -EINPROGRESS)
564                         return err;
565         }
566
567         for_each_sg(rctx->sg, tsg, rctx->nents, i) {
568                 len = sg->length;
569
570                 sg[0] = *tsg;
571                 if (sg_is_last(sg)) {
572                         if (hdev->dma_mode == 1) {
573                                 len = (ALIGN(sg->length, 16) - 16);
574
575                                 ncp = sg_pcopy_to_buffer(
576                                         rctx->sg, rctx->nents,
577                                         rctx->buffer, sg->length - len,
578                                         rctx->total - sg->length + len);
579
580                                 sg->length = len;
581                         } else {
582                                 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
583                                         len = sg->length;
584                                         sg->length = ALIGN(sg->length,
585                                                            sizeof(u32));
586                                 }
587                         }
588                 }
589
590                 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
591                                           DMA_TO_DEVICE);
592                 if (rctx->dma_ct == 0) {
593                         dev_err(hdev->dev, "dma_map_sg error\n");
594                         return -ENOMEM;
595                 }
596
597                 err = stm32_hash_xmit_dma(hdev, sg, len,
598                                           !sg_is_last(sg));
599
600                 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
601
602                 if (err == -ENOMEM)
603                         return err;
604         }
605
606         if (hdev->dma_mode == 1) {
607                 if (stm32_hash_wait_busy(hdev))
608                         return -ETIMEDOUT;
609                 reg = stm32_hash_read(hdev, HASH_CR);
610                 reg &= ~HASH_CR_DMAE;
611                 reg |= HASH_CR_DMAA;
612                 stm32_hash_write(hdev, HASH_CR, reg);
613
614                 if (ncp) {
615                         memset(buffer + ncp, 0,
616                                DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
617                         writesl(hdev->io_base + HASH_DIN, buffer,
618                                 DIV_ROUND_UP(ncp, sizeof(u32)));
619                 }
620                 stm32_hash_set_nblw(hdev, ncp);
621                 reg = stm32_hash_read(hdev, HASH_STR);
622                 reg |= HASH_STR_DCAL;
623                 stm32_hash_write(hdev, HASH_STR, reg);
624                 err = -EINPROGRESS;
625         }
626
627         if (hdev->flags & HASH_FLAGS_HMAC) {
628                 if (stm32_hash_wait_busy(hdev))
629                         return -ETIMEDOUT;
630                 err = stm32_hash_hmac_dma_send(hdev);
631         }
632
633         return err;
634 }
635
636 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
637 {
638         struct stm32_hash_dev *hdev = NULL, *tmp;
639
640         spin_lock_bh(&stm32_hash.lock);
641         if (!ctx->hdev) {
642                 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
643                         hdev = tmp;
644                         break;
645                 }
646                 ctx->hdev = hdev;
647         } else {
648                 hdev = ctx->hdev;
649         }
650
651         spin_unlock_bh(&stm32_hash.lock);
652
653         return hdev;
654 }
655
656 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
657 {
658         struct scatterlist *sg;
659         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
660         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
661         int i;
662
663         if (req->nbytes <= HASH_DMA_THRESHOLD)
664                 return false;
665
666         if (sg_nents(req->src) > 1) {
667                 if (hdev->dma_mode == 1)
668                         return false;
669                 for_each_sg(req->src, sg, sg_nents(req->src), i) {
670                         if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
671                             (!sg_is_last(sg)))
672                                 return false;
673                 }
674         }
675
676         if (req->src->offset % 4)
677                 return false;
678
679         return true;
680 }
681
682 static int stm32_hash_init(struct ahash_request *req)
683 {
684         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
685         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
686         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
687         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
688
689         rctx->hdev = hdev;
690
691         rctx->flags = HASH_FLAGS_CPU;
692
693         rctx->digcnt = crypto_ahash_digestsize(tfm);
694         switch (rctx->digcnt) {
695         case MD5_DIGEST_SIZE:
696                 rctx->flags |= HASH_FLAGS_MD5;
697                 break;
698         case SHA1_DIGEST_SIZE:
699                 rctx->flags |= HASH_FLAGS_SHA1;
700                 break;
701         case SHA224_DIGEST_SIZE:
702                 rctx->flags |= HASH_FLAGS_SHA224;
703                 break;
704         case SHA256_DIGEST_SIZE:
705                 rctx->flags |= HASH_FLAGS_SHA256;
706                 break;
707         default:
708                 return -EINVAL;
709         }
710
711         rctx->bufcnt = 0;
712         rctx->buflen = HASH_BUFLEN;
713         rctx->total = 0;
714         rctx->offset = 0;
715         rctx->data_type = HASH_DATA_8_BITS;
716
717         memset(rctx->buffer, 0, HASH_BUFLEN);
718
719         if (ctx->flags & HASH_FLAGS_HMAC)
720                 rctx->flags |= HASH_FLAGS_HMAC;
721
722         dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
723
724         return 0;
725 }
726
727 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
728 {
729         return stm32_hash_update_cpu(hdev);
730 }
731
732 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
733 {
734         struct ahash_request *req = hdev->req;
735         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
736         int err;
737         int buflen = rctx->bufcnt;
738
739         rctx->bufcnt = 0;
740
741         if (!(rctx->flags & HASH_FLAGS_CPU))
742                 err = stm32_hash_dma_send(hdev);
743         else
744                 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
745
746
747         return err;
748 }
749
750 static void stm32_hash_copy_hash(struct ahash_request *req)
751 {
752         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
753         __be32 *hash = (void *)rctx->digest;
754         unsigned int i, hashsize;
755
756         switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
757         case HASH_FLAGS_MD5:
758                 hashsize = MD5_DIGEST_SIZE;
759                 break;
760         case HASH_FLAGS_SHA1:
761                 hashsize = SHA1_DIGEST_SIZE;
762                 break;
763         case HASH_FLAGS_SHA224:
764                 hashsize = SHA224_DIGEST_SIZE;
765                 break;
766         case HASH_FLAGS_SHA256:
767                 hashsize = SHA256_DIGEST_SIZE;
768                 break;
769         default:
770                 return;
771         }
772
773         for (i = 0; i < hashsize / sizeof(u32); i++)
774                 hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
775                                                       HASH_HREG(i)));
776 }
777
778 static int stm32_hash_finish(struct ahash_request *req)
779 {
780         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
781
782         if (!req->result)
783                 return -EINVAL;
784
785         memcpy(req->result, rctx->digest, rctx->digcnt);
786
787         return 0;
788 }
789
790 static void stm32_hash_finish_req(struct ahash_request *req, int err)
791 {
792         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
793         struct stm32_hash_dev *hdev = rctx->hdev;
794
795         if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
796                 stm32_hash_copy_hash(req);
797                 err = stm32_hash_finish(req);
798                 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
799                                  HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
800                                  HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
801                                  HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
802                                  HASH_FLAGS_HMAC_KEY);
803         } else {
804                 rctx->flags |= HASH_FLAGS_ERRORS;
805         }
806
807         pm_runtime_mark_last_busy(hdev->dev);
808         pm_runtime_put_autosuspend(hdev->dev);
809
810         crypto_finalize_hash_request(hdev->engine, req, err);
811 }
812
813 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
814                               struct stm32_hash_request_ctx *rctx)
815 {
816         pm_runtime_get_sync(hdev->dev);
817
818         if (!(HASH_FLAGS_INIT & hdev->flags)) {
819                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
820                 stm32_hash_write(hdev, HASH_STR, 0);
821                 stm32_hash_write(hdev, HASH_DIN, 0);
822                 stm32_hash_write(hdev, HASH_IMR, 0);
823                 hdev->err = 0;
824         }
825
826         return 0;
827 }
828
829 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
830 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
831
832 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
833                                    struct ahash_request *req)
834 {
835         return crypto_transfer_hash_request_to_engine(hdev->engine, req);
836 }
837
838 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
839 {
840         struct ahash_request *req = container_of(areq, struct ahash_request,
841                                                  base);
842         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
843         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
844         struct stm32_hash_request_ctx *rctx;
845
846         if (!hdev)
847                 return -ENODEV;
848
849         hdev->req = req;
850
851         rctx = ahash_request_ctx(req);
852
853         dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
854                 rctx->op, req->nbytes);
855
856         return stm32_hash_hw_init(hdev, rctx);
857 }
858
859 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
860 {
861         struct ahash_request *req = container_of(areq, struct ahash_request,
862                                                  base);
863         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
864         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
865         struct stm32_hash_request_ctx *rctx;
866         int err = 0;
867
868         if (!hdev)
869                 return -ENODEV;
870
871         hdev->req = req;
872
873         rctx = ahash_request_ctx(req);
874
875         if (rctx->op == HASH_OP_UPDATE)
876                 err = stm32_hash_update_req(hdev);
877         else if (rctx->op == HASH_OP_FINAL)
878                 err = stm32_hash_final_req(hdev);
879
880         if (err != -EINPROGRESS)
881         /* done task will not finish it, so do it here */
882                 stm32_hash_finish_req(req, err);
883
884         return 0;
885 }
886
887 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
888 {
889         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
890         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
891         struct stm32_hash_dev *hdev = ctx->hdev;
892
893         rctx->op = op;
894
895         return stm32_hash_handle_queue(hdev, req);
896 }
897
898 static int stm32_hash_update(struct ahash_request *req)
899 {
900         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
901
902         if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
903                 return 0;
904
905         rctx->total = req->nbytes;
906         rctx->sg = req->src;
907         rctx->offset = 0;
908
909         if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
910                 stm32_hash_append_sg(rctx);
911                 return 0;
912         }
913
914         return stm32_hash_enqueue(req, HASH_OP_UPDATE);
915 }
916
917 static int stm32_hash_final(struct ahash_request *req)
918 {
919         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
920
921         rctx->flags |= HASH_FLAGS_FINUP;
922
923         return stm32_hash_enqueue(req, HASH_OP_FINAL);
924 }
925
926 static int stm32_hash_finup(struct ahash_request *req)
927 {
928         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
929         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
930         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
931         int err1, err2;
932
933         rctx->flags |= HASH_FLAGS_FINUP;
934
935         if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
936                 rctx->flags &= ~HASH_FLAGS_CPU;
937
938         err1 = stm32_hash_update(req);
939
940         if (err1 == -EINPROGRESS || err1 == -EBUSY)
941                 return err1;
942
943         /*
944          * final() has to be always called to cleanup resources
945          * even if update() failed, except EINPROGRESS
946          */
947         err2 = stm32_hash_final(req);
948
949         return err1 ?: err2;
950 }
951
952 static int stm32_hash_digest(struct ahash_request *req)
953 {
954         return stm32_hash_init(req) ?: stm32_hash_finup(req);
955 }
956
957 static int stm32_hash_export(struct ahash_request *req, void *out)
958 {
959         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
960         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
961         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
962         u32 *preg;
963         unsigned int i;
964
965         pm_runtime_get_sync(hdev->dev);
966
967         while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
968                 cpu_relax();
969
970         rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
971                                          sizeof(u32),
972                                          GFP_KERNEL);
973
974         preg = rctx->hw_context;
975
976         *preg++ = stm32_hash_read(hdev, HASH_IMR);
977         *preg++ = stm32_hash_read(hdev, HASH_STR);
978         *preg++ = stm32_hash_read(hdev, HASH_CR);
979         for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
980                 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
981
982         pm_runtime_mark_last_busy(hdev->dev);
983         pm_runtime_put_autosuspend(hdev->dev);
984
985         memcpy(out, rctx, sizeof(*rctx));
986
987         return 0;
988 }
989
990 static int stm32_hash_import(struct ahash_request *req, const void *in)
991 {
992         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
993         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
994         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
995         const u32 *preg = in;
996         u32 reg;
997         unsigned int i;
998
999         memcpy(rctx, in, sizeof(*rctx));
1000
1001         preg = rctx->hw_context;
1002
1003         pm_runtime_get_sync(hdev->dev);
1004
1005         stm32_hash_write(hdev, HASH_IMR, *preg++);
1006         stm32_hash_write(hdev, HASH_STR, *preg++);
1007         stm32_hash_write(hdev, HASH_CR, *preg);
1008         reg = *preg++ | HASH_CR_INIT;
1009         stm32_hash_write(hdev, HASH_CR, reg);
1010
1011         for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1012                 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1013
1014         pm_runtime_mark_last_busy(hdev->dev);
1015         pm_runtime_put_autosuspend(hdev->dev);
1016
1017         kfree(rctx->hw_context);
1018
1019         return 0;
1020 }
1021
1022 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1023                              const u8 *key, unsigned int keylen)
1024 {
1025         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1026
1027         if (keylen <= HASH_MAX_KEY_SIZE) {
1028                 memcpy(ctx->key, key, keylen);
1029                 ctx->keylen = keylen;
1030         } else {
1031                 return -ENOMEM;
1032         }
1033
1034         return 0;
1035 }
1036
1037 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1038                                     const char *algs_hmac_name)
1039 {
1040         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1041
1042         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1043                                  sizeof(struct stm32_hash_request_ctx));
1044
1045         ctx->keylen = 0;
1046
1047         if (algs_hmac_name)
1048                 ctx->flags |= HASH_FLAGS_HMAC;
1049
1050         ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1051         ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1052         ctx->enginectx.op.unprepare_request = NULL;
1053         return 0;
1054 }
1055
1056 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1057 {
1058         return stm32_hash_cra_init_algs(tfm, NULL);
1059 }
1060
1061 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1062 {
1063         return stm32_hash_cra_init_algs(tfm, "md5");
1064 }
1065
1066 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1067 {
1068         return stm32_hash_cra_init_algs(tfm, "sha1");
1069 }
1070
1071 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1072 {
1073         return stm32_hash_cra_init_algs(tfm, "sha224");
1074 }
1075
1076 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1077 {
1078         return stm32_hash_cra_init_algs(tfm, "sha256");
1079 }
1080
1081 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1082 {
1083         struct stm32_hash_dev *hdev = dev_id;
1084
1085         if (HASH_FLAGS_CPU & hdev->flags) {
1086                 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1087                         hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1088                         goto finish;
1089                 }
1090         } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1091                 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1092                         hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1093                                 goto finish;
1094                 }
1095         }
1096
1097         return IRQ_HANDLED;
1098
1099 finish:
1100         /* Finish current request */
1101         stm32_hash_finish_req(hdev->req, 0);
1102
1103         return IRQ_HANDLED;
1104 }
1105
1106 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1107 {
1108         struct stm32_hash_dev *hdev = dev_id;
1109         u32 reg;
1110
1111         reg = stm32_hash_read(hdev, HASH_SR);
1112         if (reg & HASH_SR_OUTPUT_READY) {
1113                 reg &= ~HASH_SR_OUTPUT_READY;
1114                 stm32_hash_write(hdev, HASH_SR, reg);
1115                 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1116                 /* Disable IT*/
1117                 stm32_hash_write(hdev, HASH_IMR, 0);
1118                 return IRQ_WAKE_THREAD;
1119         }
1120
1121         return IRQ_NONE;
1122 }
1123
1124 static struct ahash_alg algs_md5_sha1[] = {
1125         {
1126                 .init = stm32_hash_init,
1127                 .update = stm32_hash_update,
1128                 .final = stm32_hash_final,
1129                 .finup = stm32_hash_finup,
1130                 .digest = stm32_hash_digest,
1131                 .export = stm32_hash_export,
1132                 .import = stm32_hash_import,
1133                 .halg = {
1134                         .digestsize = MD5_DIGEST_SIZE,
1135                         .statesize = sizeof(struct stm32_hash_request_ctx),
1136                         .base = {
1137                                 .cra_name = "md5",
1138                                 .cra_driver_name = "stm32-md5",
1139                                 .cra_priority = 200,
1140                                 .cra_flags = CRYPTO_ALG_ASYNC |
1141                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1142                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1143                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1144                                 .cra_alignmask = 3,
1145                                 .cra_init = stm32_hash_cra_init,
1146                                 .cra_module = THIS_MODULE,
1147                         }
1148                 }
1149         },
1150         {
1151                 .init = stm32_hash_init,
1152                 .update = stm32_hash_update,
1153                 .final = stm32_hash_final,
1154                 .finup = stm32_hash_finup,
1155                 .digest = stm32_hash_digest,
1156                 .export = stm32_hash_export,
1157                 .import = stm32_hash_import,
1158                 .setkey = stm32_hash_setkey,
1159                 .halg = {
1160                         .digestsize = MD5_DIGEST_SIZE,
1161                         .statesize = sizeof(struct stm32_hash_request_ctx),
1162                         .base = {
1163                                 .cra_name = "hmac(md5)",
1164                                 .cra_driver_name = "stm32-hmac-md5",
1165                                 .cra_priority = 200,
1166                                 .cra_flags = CRYPTO_ALG_ASYNC |
1167                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1168                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1169                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1170                                 .cra_alignmask = 3,
1171                                 .cra_init = stm32_hash_cra_md5_init,
1172                                 .cra_module = THIS_MODULE,
1173                         }
1174                 }
1175         },
1176         {
1177                 .init = stm32_hash_init,
1178                 .update = stm32_hash_update,
1179                 .final = stm32_hash_final,
1180                 .finup = stm32_hash_finup,
1181                 .digest = stm32_hash_digest,
1182                 .export = stm32_hash_export,
1183                 .import = stm32_hash_import,
1184                 .halg = {
1185                         .digestsize = SHA1_DIGEST_SIZE,
1186                         .statesize = sizeof(struct stm32_hash_request_ctx),
1187                         .base = {
1188                                 .cra_name = "sha1",
1189                                 .cra_driver_name = "stm32-sha1",
1190                                 .cra_priority = 200,
1191                                 .cra_flags = CRYPTO_ALG_ASYNC |
1192                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1193                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1194                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1195                                 .cra_alignmask = 3,
1196                                 .cra_init = stm32_hash_cra_init,
1197                                 .cra_module = THIS_MODULE,
1198                         }
1199                 }
1200         },
1201         {
1202                 .init = stm32_hash_init,
1203                 .update = stm32_hash_update,
1204                 .final = stm32_hash_final,
1205                 .finup = stm32_hash_finup,
1206                 .digest = stm32_hash_digest,
1207                 .export = stm32_hash_export,
1208                 .import = stm32_hash_import,
1209                 .setkey = stm32_hash_setkey,
1210                 .halg = {
1211                         .digestsize = SHA1_DIGEST_SIZE,
1212                         .statesize = sizeof(struct stm32_hash_request_ctx),
1213                         .base = {
1214                                 .cra_name = "hmac(sha1)",
1215                                 .cra_driver_name = "stm32-hmac-sha1",
1216                                 .cra_priority = 200,
1217                                 .cra_flags = CRYPTO_ALG_ASYNC |
1218                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1219                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1220                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1221                                 .cra_alignmask = 3,
1222                                 .cra_init = stm32_hash_cra_sha1_init,
1223                                 .cra_module = THIS_MODULE,
1224                         }
1225                 }
1226         },
1227 };
1228
1229 static struct ahash_alg algs_sha224_sha256[] = {
1230         {
1231                 .init = stm32_hash_init,
1232                 .update = stm32_hash_update,
1233                 .final = stm32_hash_final,
1234                 .finup = stm32_hash_finup,
1235                 .digest = stm32_hash_digest,
1236                 .export = stm32_hash_export,
1237                 .import = stm32_hash_import,
1238                 .halg = {
1239                         .digestsize = SHA224_DIGEST_SIZE,
1240                         .statesize = sizeof(struct stm32_hash_request_ctx),
1241                         .base = {
1242                                 .cra_name = "sha224",
1243                                 .cra_driver_name = "stm32-sha224",
1244                                 .cra_priority = 200,
1245                                 .cra_flags = CRYPTO_ALG_ASYNC |
1246                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1247                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1248                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1249                                 .cra_alignmask = 3,
1250                                 .cra_init = stm32_hash_cra_init,
1251                                 .cra_module = THIS_MODULE,
1252                         }
1253                 }
1254         },
1255         {
1256                 .init = stm32_hash_init,
1257                 .update = stm32_hash_update,
1258                 .final = stm32_hash_final,
1259                 .finup = stm32_hash_finup,
1260                 .digest = stm32_hash_digest,
1261                 .setkey = stm32_hash_setkey,
1262                 .export = stm32_hash_export,
1263                 .import = stm32_hash_import,
1264                 .halg = {
1265                         .digestsize = SHA224_DIGEST_SIZE,
1266                         .statesize = sizeof(struct stm32_hash_request_ctx),
1267                         .base = {
1268                                 .cra_name = "hmac(sha224)",
1269                                 .cra_driver_name = "stm32-hmac-sha224",
1270                                 .cra_priority = 200,
1271                                 .cra_flags = CRYPTO_ALG_ASYNC |
1272                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1273                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1274                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1275                                 .cra_alignmask = 3,
1276                                 .cra_init = stm32_hash_cra_sha224_init,
1277                                 .cra_module = THIS_MODULE,
1278                         }
1279                 }
1280         },
1281         {
1282                 .init = stm32_hash_init,
1283                 .update = stm32_hash_update,
1284                 .final = stm32_hash_final,
1285                 .finup = stm32_hash_finup,
1286                 .digest = stm32_hash_digest,
1287                 .export = stm32_hash_export,
1288                 .import = stm32_hash_import,
1289                 .halg = {
1290                         .digestsize = SHA256_DIGEST_SIZE,
1291                         .statesize = sizeof(struct stm32_hash_request_ctx),
1292                         .base = {
1293                                 .cra_name = "sha256",
1294                                 .cra_driver_name = "stm32-sha256",
1295                                 .cra_priority = 200,
1296                                 .cra_flags = CRYPTO_ALG_ASYNC |
1297                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1298                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1299                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1300                                 .cra_alignmask = 3,
1301                                 .cra_init = stm32_hash_cra_init,
1302                                 .cra_module = THIS_MODULE,
1303                         }
1304                 }
1305         },
1306         {
1307                 .init = stm32_hash_init,
1308                 .update = stm32_hash_update,
1309                 .final = stm32_hash_final,
1310                 .finup = stm32_hash_finup,
1311                 .digest = stm32_hash_digest,
1312                 .export = stm32_hash_export,
1313                 .import = stm32_hash_import,
1314                 .setkey = stm32_hash_setkey,
1315                 .halg = {
1316                         .digestsize = SHA256_DIGEST_SIZE,
1317                         .statesize = sizeof(struct stm32_hash_request_ctx),
1318                         .base = {
1319                                 .cra_name = "hmac(sha256)",
1320                                 .cra_driver_name = "stm32-hmac-sha256",
1321                                 .cra_priority = 200,
1322                                 .cra_flags = CRYPTO_ALG_ASYNC |
1323                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1324                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1325                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1326                                 .cra_alignmask = 3,
1327                                 .cra_init = stm32_hash_cra_sha256_init,
1328                                 .cra_module = THIS_MODULE,
1329                         }
1330                 }
1331         },
1332 };
1333
1334 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1335 {
1336         unsigned int i, j;
1337         int err;
1338
1339         for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1340                 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1341                         err = crypto_register_ahash(
1342                                 &hdev->pdata->algs_info[i].algs_list[j]);
1343                         if (err)
1344                                 goto err_algs;
1345                 }
1346         }
1347
1348         return 0;
1349 err_algs:
1350         dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1351         for (; i--; ) {
1352                 for (; j--;)
1353                         crypto_unregister_ahash(
1354                                 &hdev->pdata->algs_info[i].algs_list[j]);
1355         }
1356
1357         return err;
1358 }
1359
1360 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1361 {
1362         unsigned int i, j;
1363
1364         for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1365                 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1366                         crypto_unregister_ahash(
1367                                 &hdev->pdata->algs_info[i].algs_list[j]);
1368         }
1369
1370         return 0;
1371 }
1372
1373 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1374         {
1375                 .algs_list      = algs_md5_sha1,
1376                 .size           = ARRAY_SIZE(algs_md5_sha1),
1377         },
1378 };
1379
1380 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1381         .algs_info      = stm32_hash_algs_info_stm32f4,
1382         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1383 };
1384
1385 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1386         {
1387                 .algs_list      = algs_md5_sha1,
1388                 .size           = ARRAY_SIZE(algs_md5_sha1),
1389         },
1390         {
1391                 .algs_list      = algs_sha224_sha256,
1392                 .size           = ARRAY_SIZE(algs_sha224_sha256),
1393         },
1394 };
1395
1396 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1397         .algs_info      = stm32_hash_algs_info_stm32f7,
1398         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1399 };
1400
1401 static const struct of_device_id stm32_hash_of_match[] = {
1402         {
1403                 .compatible = "st,stm32f456-hash",
1404                 .data = &stm32_hash_pdata_stm32f4,
1405         },
1406         {
1407                 .compatible = "st,stm32f756-hash",
1408                 .data = &stm32_hash_pdata_stm32f7,
1409         },
1410         {},
1411 };
1412
1413 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1414
1415 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1416                                    struct device *dev)
1417 {
1418         hdev->pdata = of_device_get_match_data(dev);
1419         if (!hdev->pdata) {
1420                 dev_err(dev, "no compatible OF match\n");
1421                 return -EINVAL;
1422         }
1423
1424         if (of_property_read_u32(dev->of_node, "dma-maxburst",
1425                                  &hdev->dma_maxburst)) {
1426                 dev_info(dev, "dma-maxburst not specified, using 0\n");
1427                 hdev->dma_maxburst = 0;
1428         }
1429
1430         return 0;
1431 }
1432
1433 static int stm32_hash_probe(struct platform_device *pdev)
1434 {
1435         struct stm32_hash_dev *hdev;
1436         struct device *dev = &pdev->dev;
1437         struct resource *res;
1438         int ret, irq;
1439
1440         hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1441         if (!hdev)
1442                 return -ENOMEM;
1443
1444         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1445         hdev->io_base = devm_ioremap_resource(dev, res);
1446         if (IS_ERR(hdev->io_base))
1447                 return PTR_ERR(hdev->io_base);
1448
1449         hdev->phys_base = res->start;
1450
1451         ret = stm32_hash_get_of_match(hdev, dev);
1452         if (ret)
1453                 return ret;
1454
1455         irq = platform_get_irq(pdev, 0);
1456         if (irq < 0)
1457                 return irq;
1458
1459         ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1460                                         stm32_hash_irq_thread, IRQF_ONESHOT,
1461                                         dev_name(dev), hdev);
1462         if (ret) {
1463                 dev_err(dev, "Cannot grab IRQ\n");
1464                 return ret;
1465         }
1466
1467         hdev->clk = devm_clk_get(&pdev->dev, NULL);
1468         if (IS_ERR(hdev->clk))
1469                 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1470                                      "failed to get clock for hash\n");
1471
1472         ret = clk_prepare_enable(hdev->clk);
1473         if (ret) {
1474                 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1475                 return ret;
1476         }
1477
1478         pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1479         pm_runtime_use_autosuspend(dev);
1480
1481         pm_runtime_get_noresume(dev);
1482         pm_runtime_set_active(dev);
1483         pm_runtime_enable(dev);
1484
1485         hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1486         if (IS_ERR(hdev->rst)) {
1487                 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1488                         ret = -EPROBE_DEFER;
1489                         goto err_reset;
1490                 }
1491         } else {
1492                 reset_control_assert(hdev->rst);
1493                 udelay(2);
1494                 reset_control_deassert(hdev->rst);
1495         }
1496
1497         hdev->dev = dev;
1498
1499         platform_set_drvdata(pdev, hdev);
1500
1501         ret = stm32_hash_dma_init(hdev);
1502         switch (ret) {
1503         case 0:
1504                 break;
1505         case -ENOENT:
1506                 dev_dbg(dev, "DMA mode not available\n");
1507                 break;
1508         default:
1509                 goto err_dma;
1510         }
1511
1512         spin_lock(&stm32_hash.lock);
1513         list_add_tail(&hdev->list, &stm32_hash.dev_list);
1514         spin_unlock(&stm32_hash.lock);
1515
1516         /* Initialize crypto engine */
1517         hdev->engine = crypto_engine_alloc_init(dev, 1);
1518         if (!hdev->engine) {
1519                 ret = -ENOMEM;
1520                 goto err_engine;
1521         }
1522
1523         ret = crypto_engine_start(hdev->engine);
1524         if (ret)
1525                 goto err_engine_start;
1526
1527         hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1528
1529         /* Register algos */
1530         ret = stm32_hash_register_algs(hdev);
1531         if (ret)
1532                 goto err_algs;
1533
1534         dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1535                  stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1536
1537         pm_runtime_put_sync(dev);
1538
1539         return 0;
1540
1541 err_algs:
1542 err_engine_start:
1543         crypto_engine_exit(hdev->engine);
1544 err_engine:
1545         spin_lock(&stm32_hash.lock);
1546         list_del(&hdev->list);
1547         spin_unlock(&stm32_hash.lock);
1548 err_dma:
1549         if (hdev->dma_lch)
1550                 dma_release_channel(hdev->dma_lch);
1551 err_reset:
1552         pm_runtime_disable(dev);
1553         pm_runtime_put_noidle(dev);
1554
1555         clk_disable_unprepare(hdev->clk);
1556
1557         return ret;
1558 }
1559
1560 static int stm32_hash_remove(struct platform_device *pdev)
1561 {
1562         struct stm32_hash_dev *hdev;
1563         int ret;
1564
1565         hdev = platform_get_drvdata(pdev);
1566         if (!hdev)
1567                 return -ENODEV;
1568
1569         ret = pm_runtime_resume_and_get(hdev->dev);
1570         if (ret < 0)
1571                 return ret;
1572
1573         stm32_hash_unregister_algs(hdev);
1574
1575         crypto_engine_exit(hdev->engine);
1576
1577         spin_lock(&stm32_hash.lock);
1578         list_del(&hdev->list);
1579         spin_unlock(&stm32_hash.lock);
1580
1581         if (hdev->dma_lch)
1582                 dma_release_channel(hdev->dma_lch);
1583
1584         pm_runtime_disable(hdev->dev);
1585         pm_runtime_put_noidle(hdev->dev);
1586
1587         clk_disable_unprepare(hdev->clk);
1588
1589         return 0;
1590 }
1591
1592 #ifdef CONFIG_PM
1593 static int stm32_hash_runtime_suspend(struct device *dev)
1594 {
1595         struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1596
1597         clk_disable_unprepare(hdev->clk);
1598
1599         return 0;
1600 }
1601
1602 static int stm32_hash_runtime_resume(struct device *dev)
1603 {
1604         struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1605         int ret;
1606
1607         ret = clk_prepare_enable(hdev->clk);
1608         if (ret) {
1609                 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1610                 return ret;
1611         }
1612
1613         return 0;
1614 }
1615 #endif
1616
1617 static const struct dev_pm_ops stm32_hash_pm_ops = {
1618         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1619                                 pm_runtime_force_resume)
1620         SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1621                            stm32_hash_runtime_resume, NULL)
1622 };
1623
1624 static struct platform_driver stm32_hash_driver = {
1625         .probe          = stm32_hash_probe,
1626         .remove         = stm32_hash_remove,
1627         .driver         = {
1628                 .name   = "stm32-hash",
1629                 .pm = &stm32_hash_pm_ops,
1630                 .of_match_table = stm32_hash_of_match,
1631         }
1632 };
1633
1634 module_platform_driver(stm32_hash_driver);
1635
1636 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1637 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1638 MODULE_LICENSE("GPL v2");