crypto: x86/glue-helper - drop CTR helper routines
[linux-2.6-microblaze.git] / arch / x86 / crypto / glue_helper.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Shared glue code for 128bit block ciphers
4  *
5  * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6  *
7  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
8  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  */
10
11 #include <linux/module.h>
12 #include <crypto/b128ops.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/scatterwalk.h>
15 #include <asm/crypto/glue_helper.h>
16
17 int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
18                         struct skcipher_request *req)
19 {
20         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
21         const unsigned int bsize = 128 / 8;
22         struct skcipher_walk walk;
23         bool fpu_enabled = false;
24         unsigned int nbytes;
25         int err;
26
27         err = skcipher_walk_virt(&walk, req, false);
28
29         while ((nbytes = walk.nbytes)) {
30                 const u8 *src = walk.src.virt.addr;
31                 u8 *dst = walk.dst.virt.addr;
32                 unsigned int func_bytes;
33                 unsigned int i;
34
35                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
36                                              &walk, fpu_enabled, nbytes);
37                 for (i = 0; i < gctx->num_funcs; i++) {
38                         func_bytes = bsize * gctx->funcs[i].num_blocks;
39
40                         if (nbytes < func_bytes)
41                                 continue;
42
43                         /* Process multi-block batch */
44                         do {
45                                 gctx->funcs[i].fn_u.ecb(ctx, dst, src);
46                                 src += func_bytes;
47                                 dst += func_bytes;
48                                 nbytes -= func_bytes;
49                         } while (nbytes >= func_bytes);
50
51                         if (nbytes < bsize)
52                                 break;
53                 }
54                 err = skcipher_walk_done(&walk, nbytes);
55         }
56
57         glue_fpu_end(fpu_enabled);
58         return err;
59 }
60 EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
61
62 int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
63                                 struct skcipher_request *req)
64 {
65         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
66         const unsigned int bsize = 128 / 8;
67         struct skcipher_walk walk;
68         unsigned int nbytes;
69         int err;
70
71         err = skcipher_walk_virt(&walk, req, false);
72
73         while ((nbytes = walk.nbytes)) {
74                 const u128 *src = (u128 *)walk.src.virt.addr;
75                 u128 *dst = (u128 *)walk.dst.virt.addr;
76                 u128 *iv = (u128 *)walk.iv;
77
78                 do {
79                         u128_xor(dst, src, iv);
80                         fn(ctx, (u8 *)dst, (u8 *)dst);
81                         iv = dst;
82                         src++;
83                         dst++;
84                         nbytes -= bsize;
85                 } while (nbytes >= bsize);
86
87                 *(u128 *)walk.iv = *iv;
88                 err = skcipher_walk_done(&walk, nbytes);
89         }
90         return err;
91 }
92 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
93
94 int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
95                                 struct skcipher_request *req)
96 {
97         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
98         const unsigned int bsize = 128 / 8;
99         struct skcipher_walk walk;
100         bool fpu_enabled = false;
101         unsigned int nbytes;
102         int err;
103
104         err = skcipher_walk_virt(&walk, req, false);
105
106         while ((nbytes = walk.nbytes)) {
107                 const u128 *src = walk.src.virt.addr;
108                 u128 *dst = walk.dst.virt.addr;
109                 unsigned int func_bytes, num_blocks;
110                 unsigned int i;
111                 u128 last_iv;
112
113                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
114                                              &walk, fpu_enabled, nbytes);
115                 /* Start of the last block. */
116                 src += nbytes / bsize - 1;
117                 dst += nbytes / bsize - 1;
118
119                 last_iv = *src;
120
121                 for (i = 0; i < gctx->num_funcs; i++) {
122                         num_blocks = gctx->funcs[i].num_blocks;
123                         func_bytes = bsize * num_blocks;
124
125                         if (nbytes < func_bytes)
126                                 continue;
127
128                         /* Process multi-block batch */
129                         do {
130                                 src -= num_blocks - 1;
131                                 dst -= num_blocks - 1;
132
133                                 gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
134                                                         (const u8 *)src);
135
136                                 nbytes -= func_bytes;
137                                 if (nbytes < bsize)
138                                         goto done;
139
140                                 u128_xor(dst, dst, --src);
141                                 dst--;
142                         } while (nbytes >= func_bytes);
143                 }
144 done:
145                 u128_xor(dst, dst, (u128 *)walk.iv);
146                 *(u128 *)walk.iv = last_iv;
147                 err = skcipher_walk_done(&walk, nbytes);
148         }
149
150         glue_fpu_end(fpu_enabled);
151         return err;
152 }
153 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
154
155 MODULE_LICENSE("GPL");