786ffda1caf432fd105a98b4dee31de1c18b909b
[linux-2.6-microblaze.git] / arch / x86 / crypto / glue_helper.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Shared glue code for 128bit block ciphers
4  *
5  * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6  *
7  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
8  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  * CTR part based on code (crypto/ctr.c) by:
10  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11  */
12
13 #include <linux/module.h>
14 #include <crypto/b128ops.h>
15 #include <crypto/internal/skcipher.h>
16 #include <crypto/scatterwalk.h>
17 #include <asm/crypto/glue_helper.h>
18
19 int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
20                         struct skcipher_request *req)
21 {
22         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
23         const unsigned int bsize = 128 / 8;
24         struct skcipher_walk walk;
25         bool fpu_enabled = false;
26         unsigned int nbytes;
27         int err;
28
29         err = skcipher_walk_virt(&walk, req, false);
30
31         while ((nbytes = walk.nbytes)) {
32                 const u8 *src = walk.src.virt.addr;
33                 u8 *dst = walk.dst.virt.addr;
34                 unsigned int func_bytes;
35                 unsigned int i;
36
37                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
38                                              &walk, fpu_enabled, nbytes);
39                 for (i = 0; i < gctx->num_funcs; i++) {
40                         func_bytes = bsize * gctx->funcs[i].num_blocks;
41
42                         if (nbytes < func_bytes)
43                                 continue;
44
45                         /* Process multi-block batch */
46                         do {
47                                 gctx->funcs[i].fn_u.ecb(ctx, dst, src);
48                                 src += func_bytes;
49                                 dst += func_bytes;
50                                 nbytes -= func_bytes;
51                         } while (nbytes >= func_bytes);
52
53                         if (nbytes < bsize)
54                                 break;
55                 }
56                 err = skcipher_walk_done(&walk, nbytes);
57         }
58
59         glue_fpu_end(fpu_enabled);
60         return err;
61 }
62 EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
63
64 int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
65                                 struct skcipher_request *req)
66 {
67         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
68         const unsigned int bsize = 128 / 8;
69         struct skcipher_walk walk;
70         unsigned int nbytes;
71         int err;
72
73         err = skcipher_walk_virt(&walk, req, false);
74
75         while ((nbytes = walk.nbytes)) {
76                 const u128 *src = (u128 *)walk.src.virt.addr;
77                 u128 *dst = (u128 *)walk.dst.virt.addr;
78                 u128 *iv = (u128 *)walk.iv;
79
80                 do {
81                         u128_xor(dst, src, iv);
82                         fn(ctx, (u8 *)dst, (u8 *)dst);
83                         iv = dst;
84                         src++;
85                         dst++;
86                         nbytes -= bsize;
87                 } while (nbytes >= bsize);
88
89                 *(u128 *)walk.iv = *iv;
90                 err = skcipher_walk_done(&walk, nbytes);
91         }
92         return err;
93 }
94 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
95
96 int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
97                                 struct skcipher_request *req)
98 {
99         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
100         const unsigned int bsize = 128 / 8;
101         struct skcipher_walk walk;
102         bool fpu_enabled = false;
103         unsigned int nbytes;
104         int err;
105
106         err = skcipher_walk_virt(&walk, req, false);
107
108         while ((nbytes = walk.nbytes)) {
109                 const u128 *src = walk.src.virt.addr;
110                 u128 *dst = walk.dst.virt.addr;
111                 unsigned int func_bytes, num_blocks;
112                 unsigned int i;
113                 u128 last_iv;
114
115                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
116                                              &walk, fpu_enabled, nbytes);
117                 /* Start of the last block. */
118                 src += nbytes / bsize - 1;
119                 dst += nbytes / bsize - 1;
120
121                 last_iv = *src;
122
123                 for (i = 0; i < gctx->num_funcs; i++) {
124                         num_blocks = gctx->funcs[i].num_blocks;
125                         func_bytes = bsize * num_blocks;
126
127                         if (nbytes < func_bytes)
128                                 continue;
129
130                         /* Process multi-block batch */
131                         do {
132                                 src -= num_blocks - 1;
133                                 dst -= num_blocks - 1;
134
135                                 gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
136                                                         (const u8 *)src);
137
138                                 nbytes -= func_bytes;
139                                 if (nbytes < bsize)
140                                         goto done;
141
142                                 u128_xor(dst, dst, --src);
143                                 dst--;
144                         } while (nbytes >= func_bytes);
145                 }
146 done:
147                 u128_xor(dst, dst, (u128 *)walk.iv);
148                 *(u128 *)walk.iv = last_iv;
149                 err = skcipher_walk_done(&walk, nbytes);
150         }
151
152         glue_fpu_end(fpu_enabled);
153         return err;
154 }
155 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
156
157 int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
158                         struct skcipher_request *req)
159 {
160         void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
161         const unsigned int bsize = 128 / 8;
162         struct skcipher_walk walk;
163         bool fpu_enabled = false;
164         unsigned int nbytes;
165         int err;
166
167         err = skcipher_walk_virt(&walk, req, false);
168
169         while ((nbytes = walk.nbytes) >= bsize) {
170                 const u128 *src = walk.src.virt.addr;
171                 u128 *dst = walk.dst.virt.addr;
172                 unsigned int func_bytes, num_blocks;
173                 unsigned int i;
174                 le128 ctrblk;
175
176                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
177                                              &walk, fpu_enabled, nbytes);
178
179                 be128_to_le128(&ctrblk, (be128 *)walk.iv);
180
181                 for (i = 0; i < gctx->num_funcs; i++) {
182                         num_blocks = gctx->funcs[i].num_blocks;
183                         func_bytes = bsize * num_blocks;
184
185                         if (nbytes < func_bytes)
186                                 continue;
187
188                         /* Process multi-block batch */
189                         do {
190                                 gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
191                                                         (const u8 *)src,
192                                                         &ctrblk);
193                                 src += num_blocks;
194                                 dst += num_blocks;
195                                 nbytes -= func_bytes;
196                         } while (nbytes >= func_bytes);
197
198                         if (nbytes < bsize)
199                                 break;
200                 }
201
202                 le128_to_be128((be128 *)walk.iv, &ctrblk);
203                 err = skcipher_walk_done(&walk, nbytes);
204         }
205
206         glue_fpu_end(fpu_enabled);
207
208         if (nbytes) {
209                 le128 ctrblk;
210                 u128 tmp;
211
212                 be128_to_le128(&ctrblk, (be128 *)walk.iv);
213                 memcpy(&tmp, walk.src.virt.addr, nbytes);
214                 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
215                                                           (const u8 *)&tmp,
216                                                           &ctrblk);
217                 memcpy(walk.dst.virt.addr, &tmp, nbytes);
218                 le128_to_be128((be128 *)walk.iv, &ctrblk);
219
220                 err = skcipher_walk_done(&walk, 0);
221         }
222
223         return err;
224 }
225 EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
226
227 MODULE_LICENSE("GPL");