Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / x86 / crypto / aesni-intel_glue.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/jump_label.h>
35 #include <linux/workqueue.h>
36 #include <linux/spinlock.h>
37 #include <linux/static_call.h>
38
39
40 #define AESNI_ALIGN     16
41 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
42 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
43 #define RFC4106_HASH_SUBKEY_SIZE 16
44 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
45 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
46 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
47
48 /* This data is stored at the end of the crypto_tfm struct.
49  * It's a type of per "session" data storage location.
50  * This needs to be 16 byte aligned.
51  */
52 struct aesni_rfc4106_gcm_ctx {
53         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
54         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
55         u8 nonce[4];
56 };
57
58 struct generic_gcmaes_ctx {
59         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
60         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
61 };
62
63 struct aesni_xts_ctx {
64         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
65         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66 };
67
68 #define GCM_BLOCK_LEN 16
69
70 struct gcm_context_data {
71         /* init, update and finalize context data */
72         u8 aad_hash[GCM_BLOCK_LEN];
73         u64 aad_length;
74         u64 in_length;
75         u8 partial_block_enc_key[GCM_BLOCK_LEN];
76         u8 orig_IV[GCM_BLOCK_LEN];
77         u8 current_counter[GCM_BLOCK_LEN];
78         u64 partial_block_len;
79         u64 unused;
80         u8 hash_keys[GCM_BLOCK_LEN * 16];
81 };
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
86 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
88                               const u8 *in, unsigned int len);
89 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len, u8 *iv);
93 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96                                   const u8 *in, unsigned int len, u8 *iv);
97 asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98                                   const u8 *in, unsigned int len, u8 *iv);
99
100 #define AVX_GEN2_OPTSIZE 640
101 #define AVX_GEN4_OPTSIZE 4096
102
103 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104                                   const u8 *in, unsigned int len, u8 *iv);
105
106 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
107                                   const u8 *in, unsigned int len, u8 *iv);
108
109 #ifdef CONFIG_X86_64
110
111 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
112                               const u8 *in, unsigned int len, u8 *iv);
113 DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
114
115 /* Scatter / Gather routines, with args similar to above */
116 asmlinkage void aesni_gcm_init(void *ctx,
117                                struct gcm_context_data *gdata,
118                                u8 *iv,
119                                u8 *hash_subkey, const u8 *aad,
120                                unsigned long aad_len);
121 asmlinkage void aesni_gcm_enc_update(void *ctx,
122                                      struct gcm_context_data *gdata, u8 *out,
123                                      const u8 *in, unsigned long plaintext_len);
124 asmlinkage void aesni_gcm_dec_update(void *ctx,
125                                      struct gcm_context_data *gdata, u8 *out,
126                                      const u8 *in,
127                                      unsigned long ciphertext_len);
128 asmlinkage void aesni_gcm_finalize(void *ctx,
129                                    struct gcm_context_data *gdata,
130                                    u8 *auth_tag, unsigned long auth_tag_len);
131
132 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
133                 void *keys, u8 *out, unsigned int num_bytes);
134 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
135                 void *keys, u8 *out, unsigned int num_bytes);
136 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
137                 void *keys, u8 *out, unsigned int num_bytes);
138 /*
139  * asmlinkage void aesni_gcm_init_avx_gen2()
140  * gcm_data *my_ctx_data, context data
141  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
142  */
143 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
144                                         struct gcm_context_data *gdata,
145                                         u8 *iv,
146                                         u8 *hash_subkey,
147                                         const u8 *aad,
148                                         unsigned long aad_len);
149
150 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
151                                      struct gcm_context_data *gdata, u8 *out,
152                                      const u8 *in, unsigned long plaintext_len);
153 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
154                                      struct gcm_context_data *gdata, u8 *out,
155                                      const u8 *in,
156                                      unsigned long ciphertext_len);
157 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
158                                    struct gcm_context_data *gdata,
159                                    u8 *auth_tag, unsigned long auth_tag_len);
160
161 /*
162  * asmlinkage void aesni_gcm_init_avx_gen4()
163  * gcm_data *my_ctx_data, context data
164  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
165  */
166 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
167                                         struct gcm_context_data *gdata,
168                                         u8 *iv,
169                                         u8 *hash_subkey,
170                                         const u8 *aad,
171                                         unsigned long aad_len);
172
173 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
174                                      struct gcm_context_data *gdata, u8 *out,
175                                      const u8 *in, unsigned long plaintext_len);
176 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
177                                      struct gcm_context_data *gdata, u8 *out,
178                                      const u8 *in,
179                                      unsigned long ciphertext_len);
180 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
181                                    struct gcm_context_data *gdata,
182                                    u8 *auth_tag, unsigned long auth_tag_len);
183
184 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
185 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
186
187 static inline struct
188 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
189 {
190         unsigned long align = AESNI_ALIGN;
191
192         if (align <= crypto_tfm_ctx_alignment())
193                 align = 1;
194         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
195 }
196
197 static inline struct
198 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
199 {
200         unsigned long align = AESNI_ALIGN;
201
202         if (align <= crypto_tfm_ctx_alignment())
203                 align = 1;
204         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
205 }
206 #endif
207
208 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
209 {
210         unsigned long addr = (unsigned long)raw_ctx;
211         unsigned long align = AESNI_ALIGN;
212
213         if (align <= crypto_tfm_ctx_alignment())
214                 align = 1;
215         return (struct crypto_aes_ctx *)ALIGN(addr, align);
216 }
217
218 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
219                               const u8 *in_key, unsigned int key_len)
220 {
221         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
222         int err;
223
224         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
225             key_len != AES_KEYSIZE_256)
226                 return -EINVAL;
227
228         if (!crypto_simd_usable())
229                 err = aes_expandkey(ctx, in_key, key_len);
230         else {
231                 kernel_fpu_begin();
232                 err = aesni_set_key(ctx, in_key, key_len);
233                 kernel_fpu_end();
234         }
235
236         return err;
237 }
238
239 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
240                        unsigned int key_len)
241 {
242         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
243 }
244
245 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
246 {
247         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
248
249         if (!crypto_simd_usable()) {
250                 aes_encrypt(ctx, dst, src);
251         } else {
252                 kernel_fpu_begin();
253                 aesni_enc(ctx, dst, src);
254                 kernel_fpu_end();
255         }
256 }
257
258 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
259 {
260         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
261
262         if (!crypto_simd_usable()) {
263                 aes_decrypt(ctx, dst, src);
264         } else {
265                 kernel_fpu_begin();
266                 aesni_dec(ctx, dst, src);
267                 kernel_fpu_end();
268         }
269 }
270
271 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
272                                  unsigned int len)
273 {
274         return aes_set_key_common(crypto_skcipher_tfm(tfm),
275                                   crypto_skcipher_ctx(tfm), key, len);
276 }
277
278 static int ecb_encrypt(struct skcipher_request *req)
279 {
280         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
281         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
282         struct skcipher_walk walk;
283         unsigned int nbytes;
284         int err;
285
286         err = skcipher_walk_virt(&walk, req, false);
287
288         while ((nbytes = walk.nbytes)) {
289                 kernel_fpu_begin();
290                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
291                               nbytes & AES_BLOCK_MASK);
292                 kernel_fpu_end();
293                 nbytes &= AES_BLOCK_SIZE - 1;
294                 err = skcipher_walk_done(&walk, nbytes);
295         }
296
297         return err;
298 }
299
300 static int ecb_decrypt(struct skcipher_request *req)
301 {
302         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
303         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
304         struct skcipher_walk walk;
305         unsigned int nbytes;
306         int err;
307
308         err = skcipher_walk_virt(&walk, req, false);
309
310         while ((nbytes = walk.nbytes)) {
311                 kernel_fpu_begin();
312                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
313                               nbytes & AES_BLOCK_MASK);
314                 kernel_fpu_end();
315                 nbytes &= AES_BLOCK_SIZE - 1;
316                 err = skcipher_walk_done(&walk, nbytes);
317         }
318
319         return err;
320 }
321
322 static int cbc_encrypt(struct skcipher_request *req)
323 {
324         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
325         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
326         struct skcipher_walk walk;
327         unsigned int nbytes;
328         int err;
329
330         err = skcipher_walk_virt(&walk, req, false);
331
332         while ((nbytes = walk.nbytes)) {
333                 kernel_fpu_begin();
334                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
335                               nbytes & AES_BLOCK_MASK, walk.iv);
336                 kernel_fpu_end();
337                 nbytes &= AES_BLOCK_SIZE - 1;
338                 err = skcipher_walk_done(&walk, nbytes);
339         }
340
341         return err;
342 }
343
344 static int cbc_decrypt(struct skcipher_request *req)
345 {
346         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
347         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
348         struct skcipher_walk walk;
349         unsigned int nbytes;
350         int err;
351
352         err = skcipher_walk_virt(&walk, req, false);
353
354         while ((nbytes = walk.nbytes)) {
355                 kernel_fpu_begin();
356                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
357                               nbytes & AES_BLOCK_MASK, walk.iv);
358                 kernel_fpu_end();
359                 nbytes &= AES_BLOCK_SIZE - 1;
360                 err = skcipher_walk_done(&walk, nbytes);
361         }
362
363         return err;
364 }
365
366 static int cts_cbc_encrypt(struct skcipher_request *req)
367 {
368         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
369         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
370         int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
371         struct scatterlist *src = req->src, *dst = req->dst;
372         struct scatterlist sg_src[2], sg_dst[2];
373         struct skcipher_request subreq;
374         struct skcipher_walk walk;
375         int err;
376
377         skcipher_request_set_tfm(&subreq, tfm);
378         skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
379                                       NULL, NULL);
380
381         if (req->cryptlen <= AES_BLOCK_SIZE) {
382                 if (req->cryptlen < AES_BLOCK_SIZE)
383                         return -EINVAL;
384                 cbc_blocks = 1;
385         }
386
387         if (cbc_blocks > 0) {
388                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
389                                            cbc_blocks * AES_BLOCK_SIZE,
390                                            req->iv);
391
392                 err = cbc_encrypt(&subreq);
393                 if (err)
394                         return err;
395
396                 if (req->cryptlen == AES_BLOCK_SIZE)
397                         return 0;
398
399                 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
400                 if (req->dst != req->src)
401                         dst = scatterwalk_ffwd(sg_dst, req->dst,
402                                                subreq.cryptlen);
403         }
404
405         /* handle ciphertext stealing */
406         skcipher_request_set_crypt(&subreq, src, dst,
407                                    req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
408                                    req->iv);
409
410         err = skcipher_walk_virt(&walk, &subreq, false);
411         if (err)
412                 return err;
413
414         kernel_fpu_begin();
415         aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
416                           walk.nbytes, walk.iv);
417         kernel_fpu_end();
418
419         return skcipher_walk_done(&walk, 0);
420 }
421
422 static int cts_cbc_decrypt(struct skcipher_request *req)
423 {
424         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
425         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
426         int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
427         struct scatterlist *src = req->src, *dst = req->dst;
428         struct scatterlist sg_src[2], sg_dst[2];
429         struct skcipher_request subreq;
430         struct skcipher_walk walk;
431         int err;
432
433         skcipher_request_set_tfm(&subreq, tfm);
434         skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
435                                       NULL, NULL);
436
437         if (req->cryptlen <= AES_BLOCK_SIZE) {
438                 if (req->cryptlen < AES_BLOCK_SIZE)
439                         return -EINVAL;
440                 cbc_blocks = 1;
441         }
442
443         if (cbc_blocks > 0) {
444                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
445                                            cbc_blocks * AES_BLOCK_SIZE,
446                                            req->iv);
447
448                 err = cbc_decrypt(&subreq);
449                 if (err)
450                         return err;
451
452                 if (req->cryptlen == AES_BLOCK_SIZE)
453                         return 0;
454
455                 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
456                 if (req->dst != req->src)
457                         dst = scatterwalk_ffwd(sg_dst, req->dst,
458                                                subreq.cryptlen);
459         }
460
461         /* handle ciphertext stealing */
462         skcipher_request_set_crypt(&subreq, src, dst,
463                                    req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
464                                    req->iv);
465
466         err = skcipher_walk_virt(&walk, &subreq, false);
467         if (err)
468                 return err;
469
470         kernel_fpu_begin();
471         aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
472                           walk.nbytes, walk.iv);
473         kernel_fpu_end();
474
475         return skcipher_walk_done(&walk, 0);
476 }
477
478 #ifdef CONFIG_X86_64
479 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
480                               const u8 *in, unsigned int len, u8 *iv)
481 {
482         /*
483          * based on key length, override with the by8 version
484          * of ctr mode encryption/decryption for improved performance
485          * aes_set_key_common() ensures that key length is one of
486          * {128,192,256}
487          */
488         if (ctx->key_length == AES_KEYSIZE_128)
489                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
490         else if (ctx->key_length == AES_KEYSIZE_192)
491                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
492         else
493                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
494 }
495
496 static int ctr_crypt(struct skcipher_request *req)
497 {
498         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
499         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
500         u8 keystream[AES_BLOCK_SIZE];
501         struct skcipher_walk walk;
502         unsigned int nbytes;
503         int err;
504
505         err = skcipher_walk_virt(&walk, req, false);
506
507         while ((nbytes = walk.nbytes) > 0) {
508                 kernel_fpu_begin();
509                 if (nbytes & AES_BLOCK_MASK)
510                         static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
511                                                        walk.src.virt.addr,
512                                                        nbytes & AES_BLOCK_MASK,
513                                                        walk.iv);
514                 nbytes &= ~AES_BLOCK_MASK;
515
516                 if (walk.nbytes == walk.total && nbytes > 0) {
517                         aesni_enc(ctx, keystream, walk.iv);
518                         crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
519                                        walk.src.virt.addr + walk.nbytes - nbytes,
520                                        keystream, nbytes);
521                         crypto_inc(walk.iv, AES_BLOCK_SIZE);
522                         nbytes = 0;
523                 }
524                 kernel_fpu_end();
525                 err = skcipher_walk_done(&walk, nbytes);
526         }
527         return err;
528 }
529
530 static int
531 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
532 {
533         struct crypto_aes_ctx ctx;
534         int ret;
535
536         ret = aes_expandkey(&ctx, key, key_len);
537         if (ret)
538                 return ret;
539
540         /* Clear the data in the hash sub key container to zero.*/
541         /* We want to cipher all zeros to create the hash sub key. */
542         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
543
544         aes_encrypt(&ctx, hash_subkey, hash_subkey);
545
546         memzero_explicit(&ctx, sizeof(ctx));
547         return 0;
548 }
549
550 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
551                                   unsigned int key_len)
552 {
553         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
554
555         if (key_len < 4)
556                 return -EINVAL;
557
558         /*Account for 4 byte nonce at the end.*/
559         key_len -= 4;
560
561         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
562
563         return aes_set_key_common(crypto_aead_tfm(aead),
564                                   &ctx->aes_key_expanded, key, key_len) ?:
565                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
566 }
567
568 /* This is the Integrity Check Value (aka the authentication tag) length and can
569  * be 8, 12 or 16 bytes long. */
570 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
571                                        unsigned int authsize)
572 {
573         switch (authsize) {
574         case 8:
575         case 12:
576         case 16:
577                 break;
578         default:
579                 return -EINVAL;
580         }
581
582         return 0;
583 }
584
585 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
586                                        unsigned int authsize)
587 {
588         switch (authsize) {
589         case 4:
590         case 8:
591         case 12:
592         case 13:
593         case 14:
594         case 15:
595         case 16:
596                 break;
597         default:
598                 return -EINVAL;
599         }
600
601         return 0;
602 }
603
604 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
605                               unsigned int assoclen, u8 *hash_subkey,
606                               u8 *iv, void *aes_ctx, u8 *auth_tag,
607                               unsigned long auth_tag_len)
608 {
609         u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
610         struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
611         unsigned long left = req->cryptlen;
612         struct scatter_walk assoc_sg_walk;
613         struct skcipher_walk walk;
614         bool do_avx, do_avx2;
615         u8 *assocmem = NULL;
616         u8 *assoc;
617         int err;
618
619         if (!enc)
620                 left -= auth_tag_len;
621
622         do_avx = (left >= AVX_GEN2_OPTSIZE);
623         do_avx2 = (left >= AVX_GEN4_OPTSIZE);
624
625         /* Linearize assoc, if not already linear */
626         if (req->src->length >= assoclen && req->src->length) {
627                 scatterwalk_start(&assoc_sg_walk, req->src);
628                 assoc = scatterwalk_map(&assoc_sg_walk);
629         } else {
630                 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
631                               GFP_KERNEL : GFP_ATOMIC;
632
633                 /* assoc can be any length, so must be on heap */
634                 assocmem = kmalloc(assoclen, flags);
635                 if (unlikely(!assocmem))
636                         return -ENOMEM;
637                 assoc = assocmem;
638
639                 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
640         }
641
642         kernel_fpu_begin();
643         if (static_branch_likely(&gcm_use_avx2) && do_avx2)
644                 aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
645                                         assoclen);
646         else if (static_branch_likely(&gcm_use_avx) && do_avx)
647                 aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
648                                         assoclen);
649         else
650                 aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
651         kernel_fpu_end();
652
653         if (!assocmem)
654                 scatterwalk_unmap(assoc);
655         else
656                 kfree(assocmem);
657
658         err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
659                   : skcipher_walk_aead_decrypt(&walk, req, false);
660
661         while (walk.nbytes > 0) {
662                 kernel_fpu_begin();
663                 if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
664                         if (enc)
665                                 aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
666                                                               walk.dst.virt.addr,
667                                                               walk.src.virt.addr,
668                                                               walk.nbytes);
669                         else
670                                 aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
671                                                               walk.dst.virt.addr,
672                                                               walk.src.virt.addr,
673                                                               walk.nbytes);
674                 } else if (static_branch_likely(&gcm_use_avx) && do_avx) {
675                         if (enc)
676                                 aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
677                                                               walk.dst.virt.addr,
678                                                               walk.src.virt.addr,
679                                                               walk.nbytes);
680                         else
681                                 aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
682                                                               walk.dst.virt.addr,
683                                                               walk.src.virt.addr,
684                                                               walk.nbytes);
685                 } else if (enc) {
686                         aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
687                                              walk.src.virt.addr, walk.nbytes);
688                 } else {
689                         aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
690                                              walk.src.virt.addr, walk.nbytes);
691                 }
692                 kernel_fpu_end();
693
694                 err = skcipher_walk_done(&walk, 0);
695         }
696
697         if (err)
698                 return err;
699
700         kernel_fpu_begin();
701         if (static_branch_likely(&gcm_use_avx2) && do_avx2)
702                 aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
703                                             auth_tag_len);
704         else if (static_branch_likely(&gcm_use_avx) && do_avx)
705                 aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
706                                             auth_tag_len);
707         else
708                 aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
709         kernel_fpu_end();
710
711         return 0;
712 }
713
714 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
715                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
716 {
717         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
718         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
719         u8 auth_tag[16];
720         int err;
721
722         err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
723                                  auth_tag, auth_tag_len);
724         if (err)
725                 return err;
726
727         scatterwalk_map_and_copy(auth_tag, req->dst,
728                                  req->assoclen + req->cryptlen,
729                                  auth_tag_len, 1);
730         return 0;
731 }
732
733 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
734                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
735 {
736         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
737         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
738         u8 auth_tag_msg[16];
739         u8 auth_tag[16];
740         int err;
741
742         err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
743                                  auth_tag, auth_tag_len);
744         if (err)
745                 return err;
746
747         /* Copy out original auth_tag */
748         scatterwalk_map_and_copy(auth_tag_msg, req->src,
749                                  req->assoclen + req->cryptlen - auth_tag_len,
750                                  auth_tag_len, 0);
751
752         /* Compare generated tag with passed in tag. */
753         if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
754                 memzero_explicit(auth_tag, sizeof(auth_tag));
755                 return -EBADMSG;
756         }
757         return 0;
758 }
759
760 static int helper_rfc4106_encrypt(struct aead_request *req)
761 {
762         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
763         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
764         void *aes_ctx = &(ctx->aes_key_expanded);
765         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
766         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
767         unsigned int i;
768         __be32 counter = cpu_to_be32(1);
769
770         /* Assuming we are supporting rfc4106 64-bit extended */
771         /* sequence numbers We need to have the AAD length equal */
772         /* to 16 or 20 bytes */
773         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
774                 return -EINVAL;
775
776         /* IV below built */
777         for (i = 0; i < 4; i++)
778                 *(iv+i) = ctx->nonce[i];
779         for (i = 0; i < 8; i++)
780                 *(iv+4+i) = req->iv[i];
781         *((__be32 *)(iv+12)) = counter;
782
783         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
784                               aes_ctx);
785 }
786
787 static int helper_rfc4106_decrypt(struct aead_request *req)
788 {
789         __be32 counter = cpu_to_be32(1);
790         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
791         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
792         void *aes_ctx = &(ctx->aes_key_expanded);
793         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
794         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
795         unsigned int i;
796
797         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
798                 return -EINVAL;
799
800         /* Assuming we are supporting rfc4106 64-bit extended */
801         /* sequence numbers We need to have the AAD length */
802         /* equal to 16 or 20 bytes */
803
804         /* IV below built */
805         for (i = 0; i < 4; i++)
806                 *(iv+i) = ctx->nonce[i];
807         for (i = 0; i < 8; i++)
808                 *(iv+4+i) = req->iv[i];
809         *((__be32 *)(iv+12)) = counter;
810
811         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
812                               aes_ctx);
813 }
814 #endif
815
816 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
817                             unsigned int keylen)
818 {
819         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
820         int err;
821
822         err = xts_verify_key(tfm, key, keylen);
823         if (err)
824                 return err;
825
826         keylen /= 2;
827
828         /* first half of xts-key is for crypt */
829         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
830                                  key, keylen);
831         if (err)
832                 return err;
833
834         /* second half of xts-key is for tweak */
835         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
836                                   key + keylen, keylen);
837 }
838
839 static int xts_crypt(struct skcipher_request *req, bool encrypt)
840 {
841         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
842         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
843         int tail = req->cryptlen % AES_BLOCK_SIZE;
844         struct skcipher_request subreq;
845         struct skcipher_walk walk;
846         int err;
847
848         if (req->cryptlen < AES_BLOCK_SIZE)
849                 return -EINVAL;
850
851         err = skcipher_walk_virt(&walk, req, false);
852         if (!walk.nbytes)
853                 return err;
854
855         if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
856                 int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
857
858                 skcipher_walk_abort(&walk);
859
860                 skcipher_request_set_tfm(&subreq, tfm);
861                 skcipher_request_set_callback(&subreq,
862                                               skcipher_request_flags(req),
863                                               NULL, NULL);
864                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
865                                            blocks * AES_BLOCK_SIZE, req->iv);
866                 req = &subreq;
867
868                 err = skcipher_walk_virt(&walk, req, false);
869                 if (err)
870                         return err;
871         } else {
872                 tail = 0;
873         }
874
875         kernel_fpu_begin();
876
877         /* calculate first value of T */
878         aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
879
880         while (walk.nbytes > 0) {
881                 int nbytes = walk.nbytes;
882
883                 if (nbytes < walk.total)
884                         nbytes &= ~(AES_BLOCK_SIZE - 1);
885
886                 if (encrypt)
887                         aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
888                                           walk.dst.virt.addr, walk.src.virt.addr,
889                                           nbytes, walk.iv);
890                 else
891                         aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
892                                           walk.dst.virt.addr, walk.src.virt.addr,
893                                           nbytes, walk.iv);
894                 kernel_fpu_end();
895
896                 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
897
898                 if (walk.nbytes > 0)
899                         kernel_fpu_begin();
900         }
901
902         if (unlikely(tail > 0 && !err)) {
903                 struct scatterlist sg_src[2], sg_dst[2];
904                 struct scatterlist *src, *dst;
905
906                 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
907                 if (req->dst != req->src)
908                         dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
909
910                 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
911                                            req->iv);
912
913                 err = skcipher_walk_virt(&walk, &subreq, false);
914                 if (err)
915                         return err;
916
917                 kernel_fpu_begin();
918                 if (encrypt)
919                         aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
920                                           walk.dst.virt.addr, walk.src.virt.addr,
921                                           walk.nbytes, walk.iv);
922                 else
923                         aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
924                                           walk.dst.virt.addr, walk.src.virt.addr,
925                                           walk.nbytes, walk.iv);
926                 kernel_fpu_end();
927
928                 err = skcipher_walk_done(&walk, 0);
929         }
930         return err;
931 }
932
933 static int xts_encrypt(struct skcipher_request *req)
934 {
935         return xts_crypt(req, true);
936 }
937
938 static int xts_decrypt(struct skcipher_request *req)
939 {
940         return xts_crypt(req, false);
941 }
942
943 static struct crypto_alg aesni_cipher_alg = {
944         .cra_name               = "aes",
945         .cra_driver_name        = "aes-aesni",
946         .cra_priority           = 300,
947         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
948         .cra_blocksize          = AES_BLOCK_SIZE,
949         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
950         .cra_module             = THIS_MODULE,
951         .cra_u  = {
952                 .cipher = {
953                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
954                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
955                         .cia_setkey             = aes_set_key,
956                         .cia_encrypt            = aesni_encrypt,
957                         .cia_decrypt            = aesni_decrypt
958                 }
959         }
960 };
961
962 static struct skcipher_alg aesni_skciphers[] = {
963         {
964                 .base = {
965                         .cra_name               = "__ecb(aes)",
966                         .cra_driver_name        = "__ecb-aes-aesni",
967                         .cra_priority           = 400,
968                         .cra_flags              = CRYPTO_ALG_INTERNAL,
969                         .cra_blocksize          = AES_BLOCK_SIZE,
970                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
971                         .cra_module             = THIS_MODULE,
972                 },
973                 .min_keysize    = AES_MIN_KEY_SIZE,
974                 .max_keysize    = AES_MAX_KEY_SIZE,
975                 .setkey         = aesni_skcipher_setkey,
976                 .encrypt        = ecb_encrypt,
977                 .decrypt        = ecb_decrypt,
978         }, {
979                 .base = {
980                         .cra_name               = "__cbc(aes)",
981                         .cra_driver_name        = "__cbc-aes-aesni",
982                         .cra_priority           = 400,
983                         .cra_flags              = CRYPTO_ALG_INTERNAL,
984                         .cra_blocksize          = AES_BLOCK_SIZE,
985                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
986                         .cra_module             = THIS_MODULE,
987                 },
988                 .min_keysize    = AES_MIN_KEY_SIZE,
989                 .max_keysize    = AES_MAX_KEY_SIZE,
990                 .ivsize         = AES_BLOCK_SIZE,
991                 .setkey         = aesni_skcipher_setkey,
992                 .encrypt        = cbc_encrypt,
993                 .decrypt        = cbc_decrypt,
994         }, {
995                 .base = {
996                         .cra_name               = "__cts(cbc(aes))",
997                         .cra_driver_name        = "__cts-cbc-aes-aesni",
998                         .cra_priority           = 400,
999                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1000                         .cra_blocksize          = AES_BLOCK_SIZE,
1001                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1002                         .cra_module             = THIS_MODULE,
1003                 },
1004                 .min_keysize    = AES_MIN_KEY_SIZE,
1005                 .max_keysize    = AES_MAX_KEY_SIZE,
1006                 .ivsize         = AES_BLOCK_SIZE,
1007                 .walksize       = 2 * AES_BLOCK_SIZE,
1008                 .setkey         = aesni_skcipher_setkey,
1009                 .encrypt        = cts_cbc_encrypt,
1010                 .decrypt        = cts_cbc_decrypt,
1011 #ifdef CONFIG_X86_64
1012         }, {
1013                 .base = {
1014                         .cra_name               = "__ctr(aes)",
1015                         .cra_driver_name        = "__ctr-aes-aesni",
1016                         .cra_priority           = 400,
1017                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1018                         .cra_blocksize          = 1,
1019                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1020                         .cra_module             = THIS_MODULE,
1021                 },
1022                 .min_keysize    = AES_MIN_KEY_SIZE,
1023                 .max_keysize    = AES_MAX_KEY_SIZE,
1024                 .ivsize         = AES_BLOCK_SIZE,
1025                 .chunksize      = AES_BLOCK_SIZE,
1026                 .setkey         = aesni_skcipher_setkey,
1027                 .encrypt        = ctr_crypt,
1028                 .decrypt        = ctr_crypt,
1029 #endif
1030         }, {
1031                 .base = {
1032                         .cra_name               = "__xts(aes)",
1033                         .cra_driver_name        = "__xts-aes-aesni",
1034                         .cra_priority           = 401,
1035                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1036                         .cra_blocksize          = AES_BLOCK_SIZE,
1037                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
1038                         .cra_module             = THIS_MODULE,
1039                 },
1040                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1041                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1042                 .ivsize         = AES_BLOCK_SIZE,
1043                 .walksize       = 2 * AES_BLOCK_SIZE,
1044                 .setkey         = xts_aesni_setkey,
1045                 .encrypt        = xts_encrypt,
1046                 .decrypt        = xts_decrypt,
1047         }
1048 };
1049
1050 static
1051 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1052
1053 #ifdef CONFIG_X86_64
1054 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1055                                   unsigned int key_len)
1056 {
1057         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1058
1059         return aes_set_key_common(crypto_aead_tfm(aead),
1060                                   &ctx->aes_key_expanded, key, key_len) ?:
1061                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1062 }
1063
1064 static int generic_gcmaes_encrypt(struct aead_request *req)
1065 {
1066         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1067         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1068         void *aes_ctx = &(ctx->aes_key_expanded);
1069         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1070         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1071         __be32 counter = cpu_to_be32(1);
1072
1073         memcpy(iv, req->iv, 12);
1074         *((__be32 *)(iv+12)) = counter;
1075
1076         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1077                               aes_ctx);
1078 }
1079
1080 static int generic_gcmaes_decrypt(struct aead_request *req)
1081 {
1082         __be32 counter = cpu_to_be32(1);
1083         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1084         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1085         void *aes_ctx = &(ctx->aes_key_expanded);
1086         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1087         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1088
1089         memcpy(iv, req->iv, 12);
1090         *((__be32 *)(iv+12)) = counter;
1091
1092         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1093                               aes_ctx);
1094 }
1095
1096 static struct aead_alg aesni_aeads[] = { {
1097         .setkey                 = common_rfc4106_set_key,
1098         .setauthsize            = common_rfc4106_set_authsize,
1099         .encrypt                = helper_rfc4106_encrypt,
1100         .decrypt                = helper_rfc4106_decrypt,
1101         .ivsize                 = GCM_RFC4106_IV_SIZE,
1102         .maxauthsize            = 16,
1103         .base = {
1104                 .cra_name               = "__rfc4106(gcm(aes))",
1105                 .cra_driver_name        = "__rfc4106-gcm-aesni",
1106                 .cra_priority           = 400,
1107                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1108                 .cra_blocksize          = 1,
1109                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1110                 .cra_alignmask          = AESNI_ALIGN - 1,
1111                 .cra_module             = THIS_MODULE,
1112         },
1113 }, {
1114         .setkey                 = generic_gcmaes_set_key,
1115         .setauthsize            = generic_gcmaes_set_authsize,
1116         .encrypt                = generic_gcmaes_encrypt,
1117         .decrypt                = generic_gcmaes_decrypt,
1118         .ivsize                 = GCM_AES_IV_SIZE,
1119         .maxauthsize            = 16,
1120         .base = {
1121                 .cra_name               = "__gcm(aes)",
1122                 .cra_driver_name        = "__generic-gcm-aesni",
1123                 .cra_priority           = 400,
1124                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1125                 .cra_blocksize          = 1,
1126                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1127                 .cra_alignmask          = AESNI_ALIGN - 1,
1128                 .cra_module             = THIS_MODULE,
1129         },
1130 } };
1131 #else
1132 static struct aead_alg aesni_aeads[0];
1133 #endif
1134
1135 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1136
1137 static const struct x86_cpu_id aesni_cpu_id[] = {
1138         X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1139         {}
1140 };
1141 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1142
1143 static int __init aesni_init(void)
1144 {
1145         int err;
1146
1147         if (!x86_match_cpu(aesni_cpu_id))
1148                 return -ENODEV;
1149 #ifdef CONFIG_X86_64
1150         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1151                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1152                 static_branch_enable(&gcm_use_avx);
1153                 static_branch_enable(&gcm_use_avx2);
1154         } else
1155         if (boot_cpu_has(X86_FEATURE_AVX)) {
1156                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1157                 static_branch_enable(&gcm_use_avx);
1158         } else {
1159                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1160         }
1161         if (boot_cpu_has(X86_FEATURE_AVX)) {
1162                 /* optimize performance of ctr mode encryption transform */
1163                 static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1164                 pr_info("AES CTR mode by8 optimization enabled\n");
1165         }
1166 #endif
1167
1168         err = crypto_register_alg(&aesni_cipher_alg);
1169         if (err)
1170                 return err;
1171
1172         err = simd_register_skciphers_compat(aesni_skciphers,
1173                                              ARRAY_SIZE(aesni_skciphers),
1174                                              aesni_simd_skciphers);
1175         if (err)
1176                 goto unregister_cipher;
1177
1178         err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1179                                          aesni_simd_aeads);
1180         if (err)
1181                 goto unregister_skciphers;
1182
1183         return 0;
1184
1185 unregister_skciphers:
1186         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1187                                   aesni_simd_skciphers);
1188 unregister_cipher:
1189         crypto_unregister_alg(&aesni_cipher_alg);
1190         return err;
1191 }
1192
1193 static void __exit aesni_exit(void)
1194 {
1195         simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1196                               aesni_simd_aeads);
1197         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1198                                   aesni_simd_skciphers);
1199         crypto_unregister_alg(&aesni_cipher_alg);
1200 }
1201
1202 late_initcall(aesni_init);
1203 module_exit(aesni_exit);
1204
1205 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1206 MODULE_LICENSE("GPL");
1207 MODULE_ALIAS_CRYPTO("aes");