Merge tag 'drm-intel-next-queued-2020-11-03' of git://anongit.freedesktop.org/drm...
[linux-2.6-microblaze.git] / arch / arm / crypto / aes-neonbs-glue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Bit sliced AES using NEON instructions
4  *
5  * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7
8 #include <asm/neon.h>
9 #include <asm/simd.h>
10 #include <crypto/aes.h>
11 #include <crypto/ctr.h>
12 #include <crypto/internal/simd.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/scatterwalk.h>
15 #include <crypto/xts.h>
16 #include <linux/module.h>
17
18 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
19 MODULE_LICENSE("GPL v2");
20
21 MODULE_ALIAS_CRYPTO("ecb(aes)");
22 MODULE_ALIAS_CRYPTO("cbc(aes)");
23 MODULE_ALIAS_CRYPTO("ctr(aes)");
24 MODULE_ALIAS_CRYPTO("xts(aes)");
25
26 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
27
28 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
29                                   int rounds, int blocks);
30 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
31                                   int rounds, int blocks);
32
33 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
34                                   int rounds, int blocks, u8 iv[]);
35
36 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
37                                   int rounds, int blocks, u8 ctr[], u8 final[]);
38
39 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
40                                   int rounds, int blocks, u8 iv[], int);
41 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
42                                   int rounds, int blocks, u8 iv[], int);
43
44 struct aesbs_ctx {
45         int     rounds;
46         u8      rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
47 };
48
49 struct aesbs_cbc_ctx {
50         struct aesbs_ctx        key;
51         struct crypto_skcipher  *enc_tfm;
52 };
53
54 struct aesbs_xts_ctx {
55         struct aesbs_ctx        key;
56         struct crypto_cipher    *cts_tfm;
57         struct crypto_cipher    *tweak_tfm;
58 };
59
60 struct aesbs_ctr_ctx {
61         struct aesbs_ctx        key;            /* must be first member */
62         struct crypto_aes_ctx   fallback;
63 };
64
65 static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
66                         unsigned int key_len)
67 {
68         struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
69         struct crypto_aes_ctx rk;
70         int err;
71
72         err = aes_expandkey(&rk, in_key, key_len);
73         if (err)
74                 return err;
75
76         ctx->rounds = 6 + key_len / 4;
77
78         kernel_neon_begin();
79         aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
80         kernel_neon_end();
81
82         return 0;
83 }
84
85 static int __ecb_crypt(struct skcipher_request *req,
86                        void (*fn)(u8 out[], u8 const in[], u8 const rk[],
87                                   int rounds, int blocks))
88 {
89         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
90         struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
91         struct skcipher_walk walk;
92         int err;
93
94         err = skcipher_walk_virt(&walk, req, false);
95
96         while (walk.nbytes >= AES_BLOCK_SIZE) {
97                 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
98
99                 if (walk.nbytes < walk.total)
100                         blocks = round_down(blocks,
101                                             walk.stride / AES_BLOCK_SIZE);
102
103                 kernel_neon_begin();
104                 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
105                    ctx->rounds, blocks);
106                 kernel_neon_end();
107                 err = skcipher_walk_done(&walk,
108                                          walk.nbytes - blocks * AES_BLOCK_SIZE);
109         }
110
111         return err;
112 }
113
114 static int ecb_encrypt(struct skcipher_request *req)
115 {
116         return __ecb_crypt(req, aesbs_ecb_encrypt);
117 }
118
119 static int ecb_decrypt(struct skcipher_request *req)
120 {
121         return __ecb_crypt(req, aesbs_ecb_decrypt);
122 }
123
124 static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
125                             unsigned int key_len)
126 {
127         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
128         struct crypto_aes_ctx rk;
129         int err;
130
131         err = aes_expandkey(&rk, in_key, key_len);
132         if (err)
133                 return err;
134
135         ctx->key.rounds = 6 + key_len / 4;
136
137         kernel_neon_begin();
138         aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
139         kernel_neon_end();
140         memzero_explicit(&rk, sizeof(rk));
141
142         return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
143 }
144
145 static int cbc_encrypt(struct skcipher_request *req)
146 {
147         struct skcipher_request *subreq = skcipher_request_ctx(req);
148         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
149         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
150
151         skcipher_request_set_tfm(subreq, ctx->enc_tfm);
152         skcipher_request_set_callback(subreq,
153                                       skcipher_request_flags(req),
154                                       NULL, NULL);
155         skcipher_request_set_crypt(subreq, req->src, req->dst,
156                                    req->cryptlen, req->iv);
157
158         return crypto_skcipher_encrypt(subreq);
159 }
160
161 static int cbc_decrypt(struct skcipher_request *req)
162 {
163         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
164         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
165         struct skcipher_walk walk;
166         int err;
167
168         err = skcipher_walk_virt(&walk, req, false);
169
170         while (walk.nbytes >= AES_BLOCK_SIZE) {
171                 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
172
173                 if (walk.nbytes < walk.total)
174                         blocks = round_down(blocks,
175                                             walk.stride / AES_BLOCK_SIZE);
176
177                 kernel_neon_begin();
178                 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
179                                   ctx->key.rk, ctx->key.rounds, blocks,
180                                   walk.iv);
181                 kernel_neon_end();
182                 err = skcipher_walk_done(&walk,
183                                          walk.nbytes - blocks * AES_BLOCK_SIZE);
184         }
185
186         return err;
187 }
188
189 static int cbc_init(struct crypto_skcipher *tfm)
190 {
191         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
192         unsigned int reqsize;
193
194         ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
195         if (IS_ERR(ctx->enc_tfm))
196                 return PTR_ERR(ctx->enc_tfm);
197
198         reqsize = sizeof(struct skcipher_request);
199         reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
200         crypto_skcipher_set_reqsize(tfm, reqsize);
201
202         return 0;
203 }
204
205 static void cbc_exit(struct crypto_skcipher *tfm)
206 {
207         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
208
209         crypto_free_skcipher(ctx->enc_tfm);
210 }
211
212 static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
213                                  unsigned int key_len)
214 {
215         struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
216         int err;
217
218         err = aes_expandkey(&ctx->fallback, in_key, key_len);
219         if (err)
220                 return err;
221
222         ctx->key.rounds = 6 + key_len / 4;
223
224         kernel_neon_begin();
225         aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
226         kernel_neon_end();
227
228         return 0;
229 }
230
231 static int ctr_encrypt(struct skcipher_request *req)
232 {
233         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
234         struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
235         struct skcipher_walk walk;
236         u8 buf[AES_BLOCK_SIZE];
237         int err;
238
239         err = skcipher_walk_virt(&walk, req, false);
240
241         while (walk.nbytes > 0) {
242                 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
243                 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
244
245                 if (walk.nbytes < walk.total) {
246                         blocks = round_down(blocks,
247                                             walk.stride / AES_BLOCK_SIZE);
248                         final = NULL;
249                 }
250
251                 kernel_neon_begin();
252                 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
253                                   ctx->rk, ctx->rounds, blocks, walk.iv, final);
254                 kernel_neon_end();
255
256                 if (final) {
257                         u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
258                         u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
259
260                         crypto_xor_cpy(dst, src, final,
261                                        walk.total % AES_BLOCK_SIZE);
262
263                         err = skcipher_walk_done(&walk, 0);
264                         break;
265                 }
266                 err = skcipher_walk_done(&walk,
267                                          walk.nbytes - blocks * AES_BLOCK_SIZE);
268         }
269
270         return err;
271 }
272
273 static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
274 {
275         struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
276         unsigned long flags;
277
278         /*
279          * Temporarily disable interrupts to avoid races where
280          * cachelines are evicted when the CPU is interrupted
281          * to do something else.
282          */
283         local_irq_save(flags);
284         aes_encrypt(&ctx->fallback, dst, src);
285         local_irq_restore(flags);
286 }
287
288 static int ctr_encrypt_sync(struct skcipher_request *req)
289 {
290         if (!crypto_simd_usable())
291                 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
292
293         return ctr_encrypt(req);
294 }
295
296 static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
297                             unsigned int key_len)
298 {
299         struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
300         int err;
301
302         err = xts_verify_key(tfm, in_key, key_len);
303         if (err)
304                 return err;
305
306         key_len /= 2;
307         err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len);
308         if (err)
309                 return err;
310         err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
311         if (err)
312                 return err;
313
314         return aesbs_setkey(tfm, in_key, key_len);
315 }
316
317 static int xts_init(struct crypto_skcipher *tfm)
318 {
319         struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
320
321         ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
322         if (IS_ERR(ctx->cts_tfm))
323                 return PTR_ERR(ctx->cts_tfm);
324
325         ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
326         if (IS_ERR(ctx->tweak_tfm))
327                 crypto_free_cipher(ctx->cts_tfm);
328
329         return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
330 }
331
332 static void xts_exit(struct crypto_skcipher *tfm)
333 {
334         struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
335
336         crypto_free_cipher(ctx->tweak_tfm);
337         crypto_free_cipher(ctx->cts_tfm);
338 }
339
340 static int __xts_crypt(struct skcipher_request *req, bool encrypt,
341                        void (*fn)(u8 out[], u8 const in[], u8 const rk[],
342                                   int rounds, int blocks, u8 iv[], int))
343 {
344         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
345         struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
346         int tail = req->cryptlen % AES_BLOCK_SIZE;
347         struct skcipher_request subreq;
348         u8 buf[2 * AES_BLOCK_SIZE];
349         struct skcipher_walk walk;
350         int err;
351
352         if (req->cryptlen < AES_BLOCK_SIZE)
353                 return -EINVAL;
354
355         if (unlikely(tail)) {
356                 skcipher_request_set_tfm(&subreq, tfm);
357                 skcipher_request_set_callback(&subreq,
358                                               skcipher_request_flags(req),
359                                               NULL, NULL);
360                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
361                                            req->cryptlen - tail, req->iv);
362                 req = &subreq;
363         }
364
365         err = skcipher_walk_virt(&walk, req, true);
366         if (err)
367                 return err;
368
369         crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
370
371         while (walk.nbytes >= AES_BLOCK_SIZE) {
372                 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
373                 int reorder_last_tweak = !encrypt && tail > 0;
374
375                 if (walk.nbytes < walk.total) {
376                         blocks = round_down(blocks,
377                                             walk.stride / AES_BLOCK_SIZE);
378                         reorder_last_tweak = 0;
379                 }
380
381                 kernel_neon_begin();
382                 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
383                    ctx->key.rounds, blocks, walk.iv, reorder_last_tweak);
384                 kernel_neon_end();
385                 err = skcipher_walk_done(&walk,
386                                          walk.nbytes - blocks * AES_BLOCK_SIZE);
387         }
388
389         if (err || likely(!tail))
390                 return err;
391
392         /* handle ciphertext stealing */
393         scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
394                                  AES_BLOCK_SIZE, 0);
395         memcpy(buf + AES_BLOCK_SIZE, buf, tail);
396         scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0);
397
398         crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
399
400         if (encrypt)
401                 crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf);
402         else
403                 crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf);
404
405         crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
406
407         scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
408                                  AES_BLOCK_SIZE + tail, 1);
409         return 0;
410 }
411
412 static int xts_encrypt(struct skcipher_request *req)
413 {
414         return __xts_crypt(req, true, aesbs_xts_encrypt);
415 }
416
417 static int xts_decrypt(struct skcipher_request *req)
418 {
419         return __xts_crypt(req, false, aesbs_xts_decrypt);
420 }
421
422 static struct skcipher_alg aes_algs[] = { {
423         .base.cra_name          = "__ecb(aes)",
424         .base.cra_driver_name   = "__ecb-aes-neonbs",
425         .base.cra_priority      = 250,
426         .base.cra_blocksize     = AES_BLOCK_SIZE,
427         .base.cra_ctxsize       = sizeof(struct aesbs_ctx),
428         .base.cra_module        = THIS_MODULE,
429         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
430
431         .min_keysize            = AES_MIN_KEY_SIZE,
432         .max_keysize            = AES_MAX_KEY_SIZE,
433         .walksize               = 8 * AES_BLOCK_SIZE,
434         .setkey                 = aesbs_setkey,
435         .encrypt                = ecb_encrypt,
436         .decrypt                = ecb_decrypt,
437 }, {
438         .base.cra_name          = "__cbc(aes)",
439         .base.cra_driver_name   = "__cbc-aes-neonbs",
440         .base.cra_priority      = 250,
441         .base.cra_blocksize     = AES_BLOCK_SIZE,
442         .base.cra_ctxsize       = sizeof(struct aesbs_cbc_ctx),
443         .base.cra_module        = THIS_MODULE,
444         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
445
446         .min_keysize            = AES_MIN_KEY_SIZE,
447         .max_keysize            = AES_MAX_KEY_SIZE,
448         .walksize               = 8 * AES_BLOCK_SIZE,
449         .ivsize                 = AES_BLOCK_SIZE,
450         .setkey                 = aesbs_cbc_setkey,
451         .encrypt                = cbc_encrypt,
452         .decrypt                = cbc_decrypt,
453         .init                   = cbc_init,
454         .exit                   = cbc_exit,
455 }, {
456         .base.cra_name          = "__ctr(aes)",
457         .base.cra_driver_name   = "__ctr-aes-neonbs",
458         .base.cra_priority      = 250,
459         .base.cra_blocksize     = 1,
460         .base.cra_ctxsize       = sizeof(struct aesbs_ctx),
461         .base.cra_module        = THIS_MODULE,
462         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
463
464         .min_keysize            = AES_MIN_KEY_SIZE,
465         .max_keysize            = AES_MAX_KEY_SIZE,
466         .chunksize              = AES_BLOCK_SIZE,
467         .walksize               = 8 * AES_BLOCK_SIZE,
468         .ivsize                 = AES_BLOCK_SIZE,
469         .setkey                 = aesbs_setkey,
470         .encrypt                = ctr_encrypt,
471         .decrypt                = ctr_encrypt,
472 }, {
473         .base.cra_name          = "ctr(aes)",
474         .base.cra_driver_name   = "ctr-aes-neonbs-sync",
475         .base.cra_priority      = 250 - 1,
476         .base.cra_blocksize     = 1,
477         .base.cra_ctxsize       = sizeof(struct aesbs_ctr_ctx),
478         .base.cra_module        = THIS_MODULE,
479
480         .min_keysize            = AES_MIN_KEY_SIZE,
481         .max_keysize            = AES_MAX_KEY_SIZE,
482         .chunksize              = AES_BLOCK_SIZE,
483         .walksize               = 8 * AES_BLOCK_SIZE,
484         .ivsize                 = AES_BLOCK_SIZE,
485         .setkey                 = aesbs_ctr_setkey_sync,
486         .encrypt                = ctr_encrypt_sync,
487         .decrypt                = ctr_encrypt_sync,
488 }, {
489         .base.cra_name          = "__xts(aes)",
490         .base.cra_driver_name   = "__xts-aes-neonbs",
491         .base.cra_priority      = 250,
492         .base.cra_blocksize     = AES_BLOCK_SIZE,
493         .base.cra_ctxsize       = sizeof(struct aesbs_xts_ctx),
494         .base.cra_module        = THIS_MODULE,
495         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
496
497         .min_keysize            = 2 * AES_MIN_KEY_SIZE,
498         .max_keysize            = 2 * AES_MAX_KEY_SIZE,
499         .walksize               = 8 * AES_BLOCK_SIZE,
500         .ivsize                 = AES_BLOCK_SIZE,
501         .setkey                 = aesbs_xts_setkey,
502         .encrypt                = xts_encrypt,
503         .decrypt                = xts_decrypt,
504         .init                   = xts_init,
505         .exit                   = xts_exit,
506 } };
507
508 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
509
510 static void aes_exit(void)
511 {
512         int i;
513
514         for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
515                 if (aes_simd_algs[i])
516                         simd_skcipher_free(aes_simd_algs[i]);
517
518         crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
519 }
520
521 static int __init aes_init(void)
522 {
523         struct simd_skcipher_alg *simd;
524         const char *basename;
525         const char *algname;
526         const char *drvname;
527         int err;
528         int i;
529
530         if (!(elf_hwcap & HWCAP_NEON))
531                 return -ENODEV;
532
533         err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
534         if (err)
535                 return err;
536
537         for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
538                 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
539                         continue;
540
541                 algname = aes_algs[i].base.cra_name + 2;
542                 drvname = aes_algs[i].base.cra_driver_name + 2;
543                 basename = aes_algs[i].base.cra_driver_name;
544                 simd = simd_skcipher_create_compat(algname, drvname, basename);
545                 err = PTR_ERR(simd);
546                 if (IS_ERR(simd))
547                         goto unregister_simds;
548
549                 aes_simd_algs[i] = simd;
550         }
551         return 0;
552
553 unregister_simds:
554         aes_exit();
555         return err;
556 }
557
558 late_initcall(aes_init);
559 module_exit(aes_exit);