1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Symmetric key cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/bug.h>
16 #include <linux/cryptouser.h>
17 #include <linux/compiler.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/seq_file.h>
22 #include <net/netlink.h>
27 SKCIPHER_WALK_PHYS = 1 << 0,
28 SKCIPHER_WALK_SLOW = 1 << 1,
29 SKCIPHER_WALK_COPY = 1 << 2,
30 SKCIPHER_WALK_DIFF = 1 << 3,
31 SKCIPHER_WALK_SLEEP = 1 << 4,
34 struct skcipher_walk_buffer {
35 struct list_head entry;
36 struct scatter_walk dst;
42 static int skcipher_walk_next(struct skcipher_walk *walk);
44 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
46 if (PageHighMem(scatterwalk_page(walk)))
50 static inline void *skcipher_map(struct scatter_walk *walk)
52 struct page *page = scatterwalk_page(walk);
54 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
55 offset_in_page(walk->offset);
58 static inline void skcipher_map_src(struct skcipher_walk *walk)
60 walk->src.virt.addr = skcipher_map(&walk->in);
63 static inline void skcipher_map_dst(struct skcipher_walk *walk)
65 walk->dst.virt.addr = skcipher_map(&walk->out);
68 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
70 skcipher_unmap(&walk->in, walk->src.virt.addr);
73 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
75 skcipher_unmap(&walk->out, walk->dst.virt.addr);
78 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
80 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
83 /* Get a spot of the specified length that does not straddle a page.
84 * The caller needs to ensure that there is enough space for this operation.
86 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
88 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
90 return max(start, end_page);
93 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
97 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
98 addr = skcipher_get_spot(addr, bsize);
99 scatterwalk_copychunks(addr, &walk->out, bsize,
100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
104 int skcipher_walk_done(struct skcipher_walk *walk, int err)
106 unsigned int n = walk->nbytes;
107 unsigned int nbytes = 0;
112 if (likely(err >= 0)) {
114 nbytes = walk->total - n;
117 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
120 SKCIPHER_WALK_DIFF)))) {
122 skcipher_unmap_src(walk);
123 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
124 skcipher_unmap_dst(walk);
126 } else if (walk->flags & SKCIPHER_WALK_COPY) {
127 skcipher_map_dst(walk);
128 memcpy(walk->dst.virt.addr, walk->page, n);
129 skcipher_unmap_dst(walk);
130 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
133 * Didn't process all bytes. Either the algorithm is
134 * broken, or this was the last step and it turned out
135 * the message wasn't evenly divisible into blocks but
136 * the algorithm requires it.
141 n = skcipher_done_slow(walk, n);
147 walk->total = nbytes;
150 scatterwalk_advance(&walk->in, n);
151 scatterwalk_advance(&walk->out, n);
152 scatterwalk_done(&walk->in, 0, nbytes);
153 scatterwalk_done(&walk->out, 1, nbytes);
156 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
157 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
158 return skcipher_walk_next(walk);
162 /* Short-circuit for the common/fast path. */
163 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
166 if (walk->flags & SKCIPHER_WALK_PHYS)
169 if (walk->iv != walk->oiv)
170 memcpy(walk->oiv, walk->iv, walk->ivsize);
171 if (walk->buffer != walk->page)
174 free_page((unsigned long)walk->page);
179 EXPORT_SYMBOL_GPL(skcipher_walk_done);
181 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
183 struct skcipher_walk_buffer *p, *tmp;
185 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
193 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
194 data = skcipher_get_spot(data, walk->stride);
197 scatterwalk_copychunks(data, &p->dst, p->len, 1);
199 if (offset_in_page(p->data) + p->len + walk->stride >
201 free_page((unsigned long)p->data);
208 if (!err && walk->iv != walk->oiv)
209 memcpy(walk->oiv, walk->iv, walk->ivsize);
210 if (walk->buffer != walk->page)
213 free_page((unsigned long)walk->page);
215 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
217 static void skcipher_queue_write(struct skcipher_walk *walk,
218 struct skcipher_walk_buffer *p)
221 list_add_tail(&p->entry, &walk->buffers);
224 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
226 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
227 unsigned alignmask = walk->alignmask;
228 struct skcipher_walk_buffer *p;
236 walk->buffer = walk->page;
237 buffer = walk->buffer;
242 /* Start with the minimum alignment of kmalloc. */
243 a = crypto_tfm_ctx_alignment() - 1;
247 /* Calculate the minimum alignment of p->buffer. */
248 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
252 /* Minimum size to align p->buffer by alignmask. */
255 /* Minimum size to ensure p->buffer does not straddle a page. */
256 n += (bsize - 1) & ~(alignmask | a);
258 v = kzalloc(n, skcipher_walk_gfp(walk));
260 return skcipher_walk_done(walk, -ENOMEM);
265 skcipher_queue_write(walk, p);
273 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
274 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
275 walk->src.virt.addr = walk->dst.virt.addr;
277 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
279 walk->nbytes = bsize;
280 walk->flags |= SKCIPHER_WALK_SLOW;
285 static int skcipher_next_copy(struct skcipher_walk *walk)
287 struct skcipher_walk_buffer *p;
288 u8 *tmp = walk->page;
290 skcipher_map_src(walk);
291 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
292 skcipher_unmap_src(walk);
294 walk->src.virt.addr = tmp;
295 walk->dst.virt.addr = tmp;
297 if (!(walk->flags & SKCIPHER_WALK_PHYS))
300 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
304 p->data = walk->page;
305 p->len = walk->nbytes;
306 skcipher_queue_write(walk, p);
308 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
312 walk->page += walk->nbytes;
317 static int skcipher_next_fast(struct skcipher_walk *walk)
321 walk->src.phys.page = scatterwalk_page(&walk->in);
322 walk->src.phys.offset = offset_in_page(walk->in.offset);
323 walk->dst.phys.page = scatterwalk_page(&walk->out);
324 walk->dst.phys.offset = offset_in_page(walk->out.offset);
326 if (walk->flags & SKCIPHER_WALK_PHYS)
329 diff = walk->src.phys.offset - walk->dst.phys.offset;
330 diff |= walk->src.virt.page - walk->dst.virt.page;
332 skcipher_map_src(walk);
333 walk->dst.virt.addr = walk->src.virt.addr;
336 walk->flags |= SKCIPHER_WALK_DIFF;
337 skcipher_map_dst(walk);
343 static int skcipher_walk_next(struct skcipher_walk *walk)
349 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
353 bsize = min(walk->stride, max(n, walk->blocksize));
354 n = scatterwalk_clamp(&walk->in, n);
355 n = scatterwalk_clamp(&walk->out, n);
357 if (unlikely(n < bsize)) {
358 if (unlikely(walk->total < walk->blocksize))
359 return skcipher_walk_done(walk, -EINVAL);
362 err = skcipher_next_slow(walk, bsize);
363 goto set_phys_lowmem;
366 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
368 gfp_t gfp = skcipher_walk_gfp(walk);
370 walk->page = (void *)__get_free_page(gfp);
375 walk->nbytes = min_t(unsigned, n,
376 PAGE_SIZE - offset_in_page(walk->page));
377 walk->flags |= SKCIPHER_WALK_COPY;
378 err = skcipher_next_copy(walk);
379 goto set_phys_lowmem;
384 return skcipher_next_fast(walk);
387 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
388 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
389 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
390 walk->src.phys.offset &= PAGE_SIZE - 1;
391 walk->dst.phys.offset &= PAGE_SIZE - 1;
396 static int skcipher_copy_iv(struct skcipher_walk *walk)
398 unsigned a = crypto_tfm_ctx_alignment() - 1;
399 unsigned alignmask = walk->alignmask;
400 unsigned ivsize = walk->ivsize;
401 unsigned bs = walk->stride;
406 aligned_bs = ALIGN(bs, alignmask + 1);
408 /* Minimum size to align buffer by alignmask. */
409 size = alignmask & ~a;
411 if (walk->flags & SKCIPHER_WALK_PHYS)
414 size += aligned_bs + ivsize;
416 /* Minimum size to ensure buffer does not straddle a page. */
417 size += (bs - 1) & ~(alignmask | a);
420 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
424 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
425 iv = skcipher_get_spot(iv, bs) + aligned_bs;
427 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
431 static int skcipher_walk_first(struct skcipher_walk *walk)
433 if (WARN_ON_ONCE(in_irq()))
437 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
438 int err = skcipher_copy_iv(walk);
445 return skcipher_walk_next(walk);
448 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
449 struct skcipher_request *req)
451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
453 walk->total = req->cryptlen;
458 if (unlikely(!walk->total))
461 scatterwalk_start(&walk->in, req->src);
462 scatterwalk_start(&walk->out, req->dst);
464 walk->flags &= ~SKCIPHER_WALK_SLEEP;
465 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
466 SKCIPHER_WALK_SLEEP : 0;
468 walk->blocksize = crypto_skcipher_blocksize(tfm);
469 walk->stride = crypto_skcipher_walksize(tfm);
470 walk->ivsize = crypto_skcipher_ivsize(tfm);
471 walk->alignmask = crypto_skcipher_alignmask(tfm);
473 return skcipher_walk_first(walk);
476 int skcipher_walk_virt(struct skcipher_walk *walk,
477 struct skcipher_request *req, bool atomic)
481 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
483 walk->flags &= ~SKCIPHER_WALK_PHYS;
485 err = skcipher_walk_skcipher(walk, req);
487 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
491 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
493 void skcipher_walk_atomise(struct skcipher_walk *walk)
495 walk->flags &= ~SKCIPHER_WALK_SLEEP;
497 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
499 int skcipher_walk_async(struct skcipher_walk *walk,
500 struct skcipher_request *req)
502 walk->flags |= SKCIPHER_WALK_PHYS;
504 INIT_LIST_HEAD(&walk->buffers);
506 return skcipher_walk_skcipher(walk, req);
508 EXPORT_SYMBOL_GPL(skcipher_walk_async);
510 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
511 struct aead_request *req, bool atomic)
513 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
520 if (unlikely(!walk->total))
523 walk->flags &= ~SKCIPHER_WALK_PHYS;
525 scatterwalk_start(&walk->in, req->src);
526 scatterwalk_start(&walk->out, req->dst);
528 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
529 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
531 scatterwalk_done(&walk->in, 0, walk->total);
532 scatterwalk_done(&walk->out, 0, walk->total);
534 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
535 walk->flags |= SKCIPHER_WALK_SLEEP;
537 walk->flags &= ~SKCIPHER_WALK_SLEEP;
539 walk->blocksize = crypto_aead_blocksize(tfm);
540 walk->stride = crypto_aead_chunksize(tfm);
541 walk->ivsize = crypto_aead_ivsize(tfm);
542 walk->alignmask = crypto_aead_alignmask(tfm);
544 err = skcipher_walk_first(walk);
547 walk->flags &= ~SKCIPHER_WALK_SLEEP;
552 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
555 walk->total = req->cryptlen;
557 return skcipher_walk_aead_common(walk, req, atomic);
559 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
561 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
562 struct aead_request *req, bool atomic)
564 walk->total = req->cryptlen;
566 return skcipher_walk_aead_common(walk, req, atomic);
568 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
570 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
571 struct aead_request *req, bool atomic)
573 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
575 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
577 return skcipher_walk_aead_common(walk, req, atomic);
579 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
581 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
583 return crypto_alg_extsize(alg);
586 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
588 if (crypto_skcipher_max_keysize(tfm) != 0)
589 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
592 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
593 const u8 *key, unsigned int keylen)
595 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
596 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
597 u8 *buffer, *alignbuffer;
598 unsigned long absize;
601 absize = keylen + alignmask;
602 buffer = kmalloc(absize, GFP_ATOMIC);
606 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
607 memcpy(alignbuffer, key, keylen);
608 ret = cipher->setkey(tfm, alignbuffer, keylen);
613 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
616 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
617 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
620 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
621 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
625 if ((unsigned long)key & alignmask)
626 err = skcipher_setkey_unaligned(tfm, key, keylen);
628 err = cipher->setkey(tfm, key, keylen);
631 skcipher_set_needkey(tfm);
635 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
639 int crypto_skcipher_encrypt(struct skcipher_request *req)
641 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
642 struct crypto_alg *alg = tfm->base.__crt_alg;
643 unsigned int cryptlen = req->cryptlen;
646 crypto_stats_get(alg);
647 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
650 ret = tfm->encrypt(req);
651 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
654 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
656 int crypto_skcipher_decrypt(struct skcipher_request *req)
658 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
659 struct crypto_alg *alg = tfm->base.__crt_alg;
660 unsigned int cryptlen = req->cryptlen;
663 crypto_stats_get(alg);
664 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
667 ret = tfm->decrypt(req);
668 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
671 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
673 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
675 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
676 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
681 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
683 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
684 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
686 skcipher->setkey = skcipher_setkey;
687 skcipher->encrypt = alg->encrypt;
688 skcipher->decrypt = alg->decrypt;
690 skcipher_set_needkey(skcipher);
693 skcipher->base.exit = crypto_skcipher_exit_tfm;
696 return alg->init(skcipher);
701 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
703 struct skcipher_instance *skcipher =
704 container_of(inst, struct skcipher_instance, s.base);
706 skcipher->free(skcipher);
709 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
711 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
713 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
716 seq_printf(m, "type : skcipher\n");
717 seq_printf(m, "async : %s\n",
718 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
719 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
720 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
721 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
722 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
723 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
724 seq_printf(m, "walksize : %u\n", skcipher->walksize);
728 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
730 struct crypto_report_blkcipher rblkcipher;
731 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
734 memset(&rblkcipher, 0, sizeof(rblkcipher));
736 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
737 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
739 rblkcipher.blocksize = alg->cra_blocksize;
740 rblkcipher.min_keysize = skcipher->min_keysize;
741 rblkcipher.max_keysize = skcipher->max_keysize;
742 rblkcipher.ivsize = skcipher->ivsize;
744 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
745 sizeof(rblkcipher), &rblkcipher);
748 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
754 static const struct crypto_type crypto_skcipher_type = {
755 .extsize = crypto_skcipher_extsize,
756 .init_tfm = crypto_skcipher_init_tfm,
757 .free = crypto_skcipher_free_instance,
758 #ifdef CONFIG_PROC_FS
759 .show = crypto_skcipher_show,
761 .report = crypto_skcipher_report,
762 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
763 .maskset = CRYPTO_ALG_TYPE_MASK,
764 .type = CRYPTO_ALG_TYPE_SKCIPHER,
765 .tfmsize = offsetof(struct crypto_skcipher, base),
768 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
769 const char *name, u32 type, u32 mask)
771 spawn->base.frontend = &crypto_skcipher_type;
772 return crypto_grab_spawn(&spawn->base, name, type, mask);
774 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
776 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
779 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
781 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
783 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
784 const char *alg_name, u32 type, u32 mask)
786 struct crypto_skcipher *tfm;
788 /* Only sync algorithms allowed. */
789 mask |= CRYPTO_ALG_ASYNC;
791 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
794 * Make sure we do not allocate something that might get used with
795 * an on-stack request: check the request size.
797 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
798 MAX_SYNC_SKCIPHER_REQSIZE)) {
799 crypto_free_skcipher(tfm);
800 return ERR_PTR(-EINVAL);
803 return (struct crypto_sync_skcipher *)tfm;
805 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
807 int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
809 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
811 EXPORT_SYMBOL_GPL(crypto_has_skcipher);
813 static int skcipher_prepare_alg(struct skcipher_alg *alg)
815 struct crypto_alg *base = &alg->base;
817 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
818 alg->walksize > PAGE_SIZE / 8)
822 alg->chunksize = base->cra_blocksize;
824 alg->walksize = alg->chunksize;
826 base->cra_type = &crypto_skcipher_type;
827 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
828 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
833 int crypto_register_skcipher(struct skcipher_alg *alg)
835 struct crypto_alg *base = &alg->base;
838 err = skcipher_prepare_alg(alg);
842 return crypto_register_alg(base);
844 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
846 void crypto_unregister_skcipher(struct skcipher_alg *alg)
848 crypto_unregister_alg(&alg->base);
850 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
852 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
856 for (i = 0; i < count; i++) {
857 ret = crypto_register_skcipher(&algs[i]);
865 for (--i; i >= 0; --i)
866 crypto_unregister_skcipher(&algs[i]);
870 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
872 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
876 for (i = count - 1; i >= 0; --i)
877 crypto_unregister_skcipher(&algs[i]);
879 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
881 int skcipher_register_instance(struct crypto_template *tmpl,
882 struct skcipher_instance *inst)
886 err = skcipher_prepare_alg(&inst->alg);
890 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
892 EXPORT_SYMBOL_GPL(skcipher_register_instance);
894 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
897 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
900 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
901 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
902 CRYPTO_TFM_REQ_MASK);
903 err = crypto_cipher_setkey(cipher, key, keylen);
904 crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
905 CRYPTO_TFM_RES_MASK);
909 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
911 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
912 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
913 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
914 struct crypto_cipher *cipher;
916 cipher = crypto_spawn_cipher(spawn);
918 return PTR_ERR(cipher);
920 ctx->cipher = cipher;
924 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
926 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
928 crypto_free_cipher(ctx->cipher);
931 static void skcipher_free_instance_simple(struct skcipher_instance *inst)
933 crypto_drop_spawn(skcipher_instance_ctx(inst));
938 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
940 * Allocate an skcipher_instance for a simple block cipher mode of operation,
941 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
942 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
943 * alignmask, and priority are set from the underlying cipher but can be
944 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
945 * default ->setkey(), ->init(), and ->exit() methods are installed.
947 * @tmpl: the template being instantiated
948 * @tb: the template parameters
949 * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
950 * returned here. It must be dropped with crypto_mod_put().
952 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
953 * needs to register the instance.
955 struct skcipher_instance *
956 skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
957 struct crypto_alg **cipher_alg_ret)
959 struct crypto_attr_type *algt;
960 struct crypto_alg *cipher_alg;
961 struct skcipher_instance *inst;
962 struct crypto_spawn *spawn;
966 algt = crypto_get_attr_type(tb);
968 return ERR_CAST(algt);
970 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
971 return ERR_PTR(-EINVAL);
973 mask = CRYPTO_ALG_TYPE_MASK |
974 crypto_requires_off(algt->type, algt->mask,
975 CRYPTO_ALG_NEED_FALLBACK);
977 cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
978 if (IS_ERR(cipher_alg))
979 return ERR_CAST(cipher_alg);
981 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
984 goto err_put_cipher_alg;
986 spawn = skcipher_instance_ctx(inst);
988 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
993 err = crypto_init_spawn(spawn, cipher_alg,
994 skcipher_crypto_instance(inst),
995 CRYPTO_ALG_TYPE_MASK);
998 inst->free = skcipher_free_instance_simple;
1000 /* Default algorithm properties, can be overridden */
1001 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1002 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1003 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1004 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1005 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1006 inst->alg.ivsize = cipher_alg->cra_blocksize;
1008 /* Use skcipher_ctx_simple by default, can be overridden */
1009 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1010 inst->alg.setkey = skcipher_setkey_simple;
1011 inst->alg.init = skcipher_init_tfm_simple;
1012 inst->alg.exit = skcipher_exit_tfm_simple;
1014 *cipher_alg_ret = cipher_alg;
1020 crypto_mod_put(cipher_alg);
1021 return ERR_PTR(err);
1023 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1025 MODULE_LICENSE("GPL");
1026 MODULE_DESCRIPTION("Symmetric key cipher type");