mm: memmap_init: iterate over memblock regions rather that check each PFN
[linux-2.6-microblaze.git] / crypto / ahash.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the asynchronous version of hash.c with notification of
6  * completion via a callback.
7  *
8  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9  */
10
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/bug.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/seq_file.h>
20 #include <linux/cryptouser.h>
21 #include <linux/compiler.h>
22 #include <net/netlink.h>
23
24 #include "internal.h"
25
26 static const struct crypto_type crypto_ahash_type;
27
28 struct ahash_request_priv {
29         crypto_completion_t complete;
30         void *data;
31         u8 *result;
32         u32 flags;
33         void *ubuf[] CRYPTO_MINALIGN_ATTR;
34 };
35
36 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 {
38         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39                             halg);
40 }
41
42 static int hash_walk_next(struct crypto_hash_walk *walk)
43 {
44         unsigned int alignmask = walk->alignmask;
45         unsigned int offset = walk->offset;
46         unsigned int nbytes = min(walk->entrylen,
47                                   ((unsigned int)(PAGE_SIZE)) - offset);
48
49         if (walk->flags & CRYPTO_ALG_ASYNC)
50                 walk->data = kmap(walk->pg);
51         else
52                 walk->data = kmap_atomic(walk->pg);
53         walk->data += offset;
54
55         if (offset & alignmask) {
56                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
57
58                 if (nbytes > unaligned)
59                         nbytes = unaligned;
60         }
61
62         walk->entrylen -= nbytes;
63         return nbytes;
64 }
65
66 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
67 {
68         struct scatterlist *sg;
69
70         sg = walk->sg;
71         walk->offset = sg->offset;
72         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
73         walk->offset = offset_in_page(walk->offset);
74         walk->entrylen = sg->length;
75
76         if (walk->entrylen > walk->total)
77                 walk->entrylen = walk->total;
78         walk->total -= walk->entrylen;
79
80         return hash_walk_next(walk);
81 }
82
83 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
84 {
85         unsigned int alignmask = walk->alignmask;
86
87         walk->data -= walk->offset;
88
89         if (walk->entrylen && (walk->offset & alignmask) && !err) {
90                 unsigned int nbytes;
91
92                 walk->offset = ALIGN(walk->offset, alignmask + 1);
93                 nbytes = min(walk->entrylen,
94                              (unsigned int)(PAGE_SIZE - walk->offset));
95                 if (nbytes) {
96                         walk->entrylen -= nbytes;
97                         walk->data += walk->offset;
98                         return nbytes;
99                 }
100         }
101
102         if (walk->flags & CRYPTO_ALG_ASYNC)
103                 kunmap(walk->pg);
104         else {
105                 kunmap_atomic(walk->data);
106                 /*
107                  * The may sleep test only makes sense for sync users.
108                  * Async users don't need to sleep here anyway.
109                  */
110                 crypto_yield(walk->flags);
111         }
112
113         if (err)
114                 return err;
115
116         if (walk->entrylen) {
117                 walk->offset = 0;
118                 walk->pg++;
119                 return hash_walk_next(walk);
120         }
121
122         if (!walk->total)
123                 return 0;
124
125         walk->sg = sg_next(walk->sg);
126
127         return hash_walk_new_entry(walk);
128 }
129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131 int crypto_hash_walk_first(struct ahash_request *req,
132                            struct crypto_hash_walk *walk)
133 {
134         walk->total = req->nbytes;
135
136         if (!walk->total) {
137                 walk->entrylen = 0;
138                 return 0;
139         }
140
141         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142         walk->sg = req->src;
143         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145         return hash_walk_new_entry(walk);
146 }
147 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149 int crypto_ahash_walk_first(struct ahash_request *req,
150                             struct crypto_hash_walk *walk)
151 {
152         walk->total = req->nbytes;
153
154         if (!walk->total) {
155                 walk->entrylen = 0;
156                 return 0;
157         }
158
159         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160         walk->sg = req->src;
161         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162         walk->flags |= CRYPTO_ALG_ASYNC;
163
164         BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166         return hash_walk_new_entry(walk);
167 }
168 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171                                 unsigned int keylen)
172 {
173         unsigned long alignmask = crypto_ahash_alignmask(tfm);
174         int ret;
175         u8 *buffer, *alignbuffer;
176         unsigned long absize;
177
178         absize = keylen + alignmask;
179         buffer = kmalloc(absize, GFP_KERNEL);
180         if (!buffer)
181                 return -ENOMEM;
182
183         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184         memcpy(alignbuffer, key, keylen);
185         ret = tfm->setkey(tfm, alignbuffer, keylen);
186         kzfree(buffer);
187         return ret;
188 }
189
190 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
191                           unsigned int keylen)
192 {
193         return -ENOSYS;
194 }
195
196 static void ahash_set_needkey(struct crypto_ahash *tfm)
197 {
198         const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
199
200         if (tfm->setkey != ahash_nosetkey &&
201             !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
202                 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
203 }
204
205 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
206                         unsigned int keylen)
207 {
208         unsigned long alignmask = crypto_ahash_alignmask(tfm);
209         int err;
210
211         if ((unsigned long)key & alignmask)
212                 err = ahash_setkey_unaligned(tfm, key, keylen);
213         else
214                 err = tfm->setkey(tfm, key, keylen);
215
216         if (unlikely(err)) {
217                 ahash_set_needkey(tfm);
218                 return err;
219         }
220
221         crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
222         return 0;
223 }
224 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
225
226 static inline unsigned int ahash_align_buffer_size(unsigned len,
227                                                    unsigned long mask)
228 {
229         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
230 }
231
232 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
233 {
234         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
235         unsigned long alignmask = crypto_ahash_alignmask(tfm);
236         unsigned int ds = crypto_ahash_digestsize(tfm);
237         struct ahash_request_priv *priv;
238
239         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
240                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
241                        GFP_KERNEL : GFP_ATOMIC);
242         if (!priv)
243                 return -ENOMEM;
244
245         /*
246          * WARNING: Voodoo programming below!
247          *
248          * The code below is obscure and hard to understand, thus explanation
249          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250          * to understand the layout of structures used here!
251          *
252          * The code here will replace portions of the ORIGINAL request with
253          * pointers to new code and buffers so the hashing operation can store
254          * the result in aligned buffer. We will call the modified request
255          * an ADJUSTED request.
256          *
257          * The newly mangled request will look as such:
258          *
259          * req {
260          *   .result        = ADJUSTED[new aligned buffer]
261          *   .base.complete = ADJUSTED[pointer to completion function]
262          *   .base.data     = ADJUSTED[*req (pointer to self)]
263          *   .priv          = ADJUSTED[new priv] {
264          *           .result   = ORIGINAL(result)
265          *           .complete = ORIGINAL(base.complete)
266          *           .data     = ORIGINAL(base.data)
267          *   }
268          */
269
270         priv->result = req->result;
271         priv->complete = req->base.complete;
272         priv->data = req->base.data;
273         priv->flags = req->base.flags;
274
275         /*
276          * WARNING: We do not backup req->priv here! The req->priv
277          *          is for internal use of the Crypto API and the
278          *          user must _NOT_ _EVER_ depend on it's content!
279          */
280
281         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
282         req->base.complete = cplt;
283         req->base.data = req;
284         req->priv = priv;
285
286         return 0;
287 }
288
289 static void ahash_restore_req(struct ahash_request *req, int err)
290 {
291         struct ahash_request_priv *priv = req->priv;
292
293         if (!err)
294                 memcpy(priv->result, req->result,
295                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
296
297         /* Restore the original crypto request. */
298         req->result = priv->result;
299
300         ahash_request_set_callback(req, priv->flags,
301                                    priv->complete, priv->data);
302         req->priv = NULL;
303
304         /* Free the req->priv.priv from the ADJUSTED request. */
305         kzfree(priv);
306 }
307
308 static void ahash_notify_einprogress(struct ahash_request *req)
309 {
310         struct ahash_request_priv *priv = req->priv;
311         struct crypto_async_request oreq;
312
313         oreq.data = priv->data;
314
315         priv->complete(&oreq, -EINPROGRESS);
316 }
317
318 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
319 {
320         struct ahash_request *areq = req->data;
321
322         if (err == -EINPROGRESS) {
323                 ahash_notify_einprogress(areq);
324                 return;
325         }
326
327         /*
328          * Restore the original request, see ahash_op_unaligned() for what
329          * goes where.
330          *
331          * The "struct ahash_request *req" here is in fact the "req.base"
332          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
333          * is a pointer to self, it is also the ADJUSTED "req" .
334          */
335
336         /* First copy req->result into req->priv.result */
337         ahash_restore_req(areq, err);
338
339         /* Complete the ORIGINAL request. */
340         areq->base.complete(&areq->base, err);
341 }
342
343 static int ahash_op_unaligned(struct ahash_request *req,
344                               int (*op)(struct ahash_request *))
345 {
346         int err;
347
348         err = ahash_save_req(req, ahash_op_unaligned_done);
349         if (err)
350                 return err;
351
352         err = op(req);
353         if (err == -EINPROGRESS || err == -EBUSY)
354                 return err;
355
356         ahash_restore_req(req, err);
357
358         return err;
359 }
360
361 static int crypto_ahash_op(struct ahash_request *req,
362                            int (*op)(struct ahash_request *))
363 {
364         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
365         unsigned long alignmask = crypto_ahash_alignmask(tfm);
366
367         if ((unsigned long)req->result & alignmask)
368                 return ahash_op_unaligned(req, op);
369
370         return op(req);
371 }
372
373 int crypto_ahash_final(struct ahash_request *req)
374 {
375         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376         struct crypto_alg *alg = tfm->base.__crt_alg;
377         unsigned int nbytes = req->nbytes;
378         int ret;
379
380         crypto_stats_get(alg);
381         ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
382         crypto_stats_ahash_final(nbytes, ret, alg);
383         return ret;
384 }
385 EXPORT_SYMBOL_GPL(crypto_ahash_final);
386
387 int crypto_ahash_finup(struct ahash_request *req)
388 {
389         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
390         struct crypto_alg *alg = tfm->base.__crt_alg;
391         unsigned int nbytes = req->nbytes;
392         int ret;
393
394         crypto_stats_get(alg);
395         ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
396         crypto_stats_ahash_final(nbytes, ret, alg);
397         return ret;
398 }
399 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
400
401 int crypto_ahash_digest(struct ahash_request *req)
402 {
403         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
404         struct crypto_alg *alg = tfm->base.__crt_alg;
405         unsigned int nbytes = req->nbytes;
406         int ret;
407
408         crypto_stats_get(alg);
409         if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
410                 ret = -ENOKEY;
411         else
412                 ret = crypto_ahash_op(req, tfm->digest);
413         crypto_stats_ahash_final(nbytes, ret, alg);
414         return ret;
415 }
416 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
417
418 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
419 {
420         struct ahash_request *areq = req->data;
421
422         if (err == -EINPROGRESS)
423                 return;
424
425         ahash_restore_req(areq, err);
426
427         areq->base.complete(&areq->base, err);
428 }
429
430 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
431 {
432         if (err)
433                 goto out;
434
435         req->base.complete = ahash_def_finup_done2;
436
437         err = crypto_ahash_reqtfm(req)->final(req);
438         if (err == -EINPROGRESS || err == -EBUSY)
439                 return err;
440
441 out:
442         ahash_restore_req(req, err);
443         return err;
444 }
445
446 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
447 {
448         struct ahash_request *areq = req->data;
449
450         if (err == -EINPROGRESS) {
451                 ahash_notify_einprogress(areq);
452                 return;
453         }
454
455         areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
456
457         err = ahash_def_finup_finish1(areq, err);
458         if (areq->priv)
459                 return;
460
461         areq->base.complete(&areq->base, err);
462 }
463
464 static int ahash_def_finup(struct ahash_request *req)
465 {
466         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
467         int err;
468
469         err = ahash_save_req(req, ahash_def_finup_done1);
470         if (err)
471                 return err;
472
473         err = tfm->update(req);
474         if (err == -EINPROGRESS || err == -EBUSY)
475                 return err;
476
477         return ahash_def_finup_finish1(req, err);
478 }
479
480 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
481 {
482         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
483         struct ahash_alg *alg = crypto_ahash_alg(hash);
484
485         hash->setkey = ahash_nosetkey;
486
487         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
488                 return crypto_init_shash_ops_async(tfm);
489
490         hash->init = alg->init;
491         hash->update = alg->update;
492         hash->final = alg->final;
493         hash->finup = alg->finup ?: ahash_def_finup;
494         hash->digest = alg->digest;
495         hash->export = alg->export;
496         hash->import = alg->import;
497
498         if (alg->setkey) {
499                 hash->setkey = alg->setkey;
500                 ahash_set_needkey(hash);
501         }
502
503         return 0;
504 }
505
506 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
507 {
508         if (alg->cra_type != &crypto_ahash_type)
509                 return sizeof(struct crypto_shash *);
510
511         return crypto_alg_extsize(alg);
512 }
513
514 static void crypto_ahash_free_instance(struct crypto_instance *inst)
515 {
516         struct ahash_instance *ahash = ahash_instance(inst);
517
518         ahash->free(ahash);
519 }
520
521 #ifdef CONFIG_NET
522 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
523 {
524         struct crypto_report_hash rhash;
525
526         memset(&rhash, 0, sizeof(rhash));
527
528         strscpy(rhash.type, "ahash", sizeof(rhash.type));
529
530         rhash.blocksize = alg->cra_blocksize;
531         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
532
533         return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
534 }
535 #else
536 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
537 {
538         return -ENOSYS;
539 }
540 #endif
541
542 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
543         __maybe_unused;
544 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
545 {
546         seq_printf(m, "type         : ahash\n");
547         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
548                                              "yes" : "no");
549         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
550         seq_printf(m, "digestsize   : %u\n",
551                    __crypto_hash_alg_common(alg)->digestsize);
552 }
553
554 static const struct crypto_type crypto_ahash_type = {
555         .extsize = crypto_ahash_extsize,
556         .init_tfm = crypto_ahash_init_tfm,
557         .free = crypto_ahash_free_instance,
558 #ifdef CONFIG_PROC_FS
559         .show = crypto_ahash_show,
560 #endif
561         .report = crypto_ahash_report,
562         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
563         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
564         .type = CRYPTO_ALG_TYPE_AHASH,
565         .tfmsize = offsetof(struct crypto_ahash, base),
566 };
567
568 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
569                       struct crypto_instance *inst,
570                       const char *name, u32 type, u32 mask)
571 {
572         spawn->base.frontend = &crypto_ahash_type;
573         return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
574 }
575 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
576
577 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
578                                         u32 mask)
579 {
580         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
581 }
582 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
583
584 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
585 {
586         return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
587 }
588 EXPORT_SYMBOL_GPL(crypto_has_ahash);
589
590 static int ahash_prepare_alg(struct ahash_alg *alg)
591 {
592         struct crypto_alg *base = &alg->halg.base;
593
594         if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
595             alg->halg.statesize > HASH_MAX_STATESIZE ||
596             alg->halg.statesize == 0)
597                 return -EINVAL;
598
599         base->cra_type = &crypto_ahash_type;
600         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
601         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
602
603         return 0;
604 }
605
606 int crypto_register_ahash(struct ahash_alg *alg)
607 {
608         struct crypto_alg *base = &alg->halg.base;
609         int err;
610
611         err = ahash_prepare_alg(alg);
612         if (err)
613                 return err;
614
615         return crypto_register_alg(base);
616 }
617 EXPORT_SYMBOL_GPL(crypto_register_ahash);
618
619 void crypto_unregister_ahash(struct ahash_alg *alg)
620 {
621         crypto_unregister_alg(&alg->halg.base);
622 }
623 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
624
625 int crypto_register_ahashes(struct ahash_alg *algs, int count)
626 {
627         int i, ret;
628
629         for (i = 0; i < count; i++) {
630                 ret = crypto_register_ahash(&algs[i]);
631                 if (ret)
632                         goto err;
633         }
634
635         return 0;
636
637 err:
638         for (--i; i >= 0; --i)
639                 crypto_unregister_ahash(&algs[i]);
640
641         return ret;
642 }
643 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
644
645 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
646 {
647         int i;
648
649         for (i = count - 1; i >= 0; --i)
650                 crypto_unregister_ahash(&algs[i]);
651 }
652 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
653
654 int ahash_register_instance(struct crypto_template *tmpl,
655                             struct ahash_instance *inst)
656 {
657         int err;
658
659         if (WARN_ON(!inst->free))
660                 return -EINVAL;
661
662         err = ahash_prepare_alg(&inst->alg);
663         if (err)
664                 return err;
665
666         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
667 }
668 EXPORT_SYMBOL_GPL(ahash_register_instance);
669
670 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
671 {
672         struct crypto_alg *alg = &halg->base;
673
674         if (alg->cra_type != &crypto_ahash_type)
675                 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
676
677         return __crypto_ahash_alg(alg)->setkey != NULL;
678 }
679 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
680
681 MODULE_LICENSE("GPL");
682 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");