Merge tag 'mtd/fixes-for-5.7-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / crypto / aesni-intel_glue.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36 #ifdef CONFIG_X86_64
37 #include <asm/crypto/glue_helper.h>
38 #endif
39
40
41 #define AESNI_ALIGN     16
42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
43 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
44 #define RFC4106_HASH_SUBKEY_SIZE 16
45 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
46 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
47 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
48
49 /* This data is stored at the end of the crypto_tfm struct.
50  * It's a type of per "session" data storage location.
51  * This needs to be 16 byte aligned.
52  */
53 struct aesni_rfc4106_gcm_ctx {
54         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
55         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
56         u8 nonce[4];
57 };
58
59 struct generic_gcmaes_ctx {
60         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62 };
63
64 struct aesni_xts_ctx {
65         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 };
68
69 #define GCM_BLOCK_LEN 16
70
71 struct gcm_context_data {
72         /* init, update and finalize context data */
73         u8 aad_hash[GCM_BLOCK_LEN];
74         u64 aad_length;
75         u64 in_length;
76         u8 partial_block_enc_key[GCM_BLOCK_LEN];
77         u8 orig_IV[GCM_BLOCK_LEN];
78         u8 current_counter[GCM_BLOCK_LEN];
79         u64 partial_block_len;
80         u64 unused;
81         u8 hash_keys[GCM_BLOCK_LEN * 16];
82 };
83
84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85                              unsigned int key_len);
86 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89                               const u8 *in, unsigned int len);
90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91                               const u8 *in, unsigned int len);
92 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93                               const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95                               const u8 *in, unsigned int len, u8 *iv);
96
97 #define AVX_GEN2_OPTSIZE 640
98 #define AVX_GEN4_OPTSIZE 4096
99
100 #ifdef CONFIG_X86_64
101
102 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
103                               const u8 *in, unsigned int len, u8 *iv);
104 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
105                               const u8 *in, unsigned int len, u8 *iv);
106
107 asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
108                                  const u8 *in, bool enc, le128 *iv);
109
110 /* asmlinkage void aesni_gcm_enc()
111  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
112  * struct gcm_context_data.  May be uninitialized.
113  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
114  * const u8 *in, Plaintext input
115  * unsigned long plaintext_len, Length of data in bytes for encryption.
116  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
117  *         16-byte aligned pointer.
118  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
119  * const u8 *aad, Additional Authentication Data (AAD)
120  * unsigned long aad_len, Length of AAD in bytes.
121  * u8 *auth_tag, Authenticated Tag output.
122  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
123  *          Valid values are 16 (most likely), 12 or 8.
124  */
125 asmlinkage void aesni_gcm_enc(void *ctx,
126                         struct gcm_context_data *gdata, u8 *out,
127                         const u8 *in, unsigned long plaintext_len, u8 *iv,
128                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
129                         u8 *auth_tag, unsigned long auth_tag_len);
130
131 /* asmlinkage void aesni_gcm_dec()
132  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
133  * struct gcm_context_data.  May be uninitialized.
134  * u8 *out, Plaintext output. Decrypt in-place is allowed.
135  * const u8 *in, Ciphertext input
136  * unsigned long ciphertext_len, Length of data in bytes for decryption.
137  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
138  *         16-byte aligned pointer.
139  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
140  * const u8 *aad, Additional Authentication Data (AAD)
141  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
142  * to be 8 or 12 bytes
143  * u8 *auth_tag, Authenticated Tag output.
144  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
145  * Valid values are 16 (most likely), 12 or 8.
146  */
147 asmlinkage void aesni_gcm_dec(void *ctx,
148                         struct gcm_context_data *gdata, u8 *out,
149                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
150                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
151                         u8 *auth_tag, unsigned long auth_tag_len);
152
153 /* Scatter / Gather routines, with args similar to above */
154 asmlinkage void aesni_gcm_init(void *ctx,
155                                struct gcm_context_data *gdata,
156                                u8 *iv,
157                                u8 *hash_subkey, const u8 *aad,
158                                unsigned long aad_len);
159 asmlinkage void aesni_gcm_enc_update(void *ctx,
160                                      struct gcm_context_data *gdata, u8 *out,
161                                      const u8 *in, unsigned long plaintext_len);
162 asmlinkage void aesni_gcm_dec_update(void *ctx,
163                                      struct gcm_context_data *gdata, u8 *out,
164                                      const u8 *in,
165                                      unsigned long ciphertext_len);
166 asmlinkage void aesni_gcm_finalize(void *ctx,
167                                    struct gcm_context_data *gdata,
168                                    u8 *auth_tag, unsigned long auth_tag_len);
169
170 static const struct aesni_gcm_tfm_s {
171         void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
172                      u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
173         void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
174                            const u8 *in, unsigned long plaintext_len);
175         void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
176                            const u8 *in, unsigned long ciphertext_len);
177         void (*finalize)(void *ctx, struct gcm_context_data *gdata,
178                          u8 *auth_tag, unsigned long auth_tag_len);
179 } *aesni_gcm_tfm;
180
181 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
182         .init = &aesni_gcm_init,
183         .enc_update = &aesni_gcm_enc_update,
184         .dec_update = &aesni_gcm_dec_update,
185         .finalize = &aesni_gcm_finalize,
186 };
187
188 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
189                 void *keys, u8 *out, unsigned int num_bytes);
190 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
191                 void *keys, u8 *out, unsigned int num_bytes);
192 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
193                 void *keys, u8 *out, unsigned int num_bytes);
194 /*
195  * asmlinkage void aesni_gcm_init_avx_gen2()
196  * gcm_data *my_ctx_data, context data
197  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
198  */
199 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
200                                         struct gcm_context_data *gdata,
201                                         u8 *iv,
202                                         u8 *hash_subkey,
203                                         const u8 *aad,
204                                         unsigned long aad_len);
205
206 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
207                                      struct gcm_context_data *gdata, u8 *out,
208                                      const u8 *in, unsigned long plaintext_len);
209 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
210                                      struct gcm_context_data *gdata, u8 *out,
211                                      const u8 *in,
212                                      unsigned long ciphertext_len);
213 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
214                                    struct gcm_context_data *gdata,
215                                    u8 *auth_tag, unsigned long auth_tag_len);
216
217 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
218                                 struct gcm_context_data *gdata, u8 *out,
219                         const u8 *in, unsigned long plaintext_len, u8 *iv,
220                         const u8 *aad, unsigned long aad_len,
221                         u8 *auth_tag, unsigned long auth_tag_len);
222
223 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
224                                 struct gcm_context_data *gdata, u8 *out,
225                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
226                         const u8 *aad, unsigned long aad_len,
227                         u8 *auth_tag, unsigned long auth_tag_len);
228
229 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
230         .init = &aesni_gcm_init_avx_gen2,
231         .enc_update = &aesni_gcm_enc_update_avx_gen2,
232         .dec_update = &aesni_gcm_dec_update_avx_gen2,
233         .finalize = &aesni_gcm_finalize_avx_gen2,
234 };
235
236 /*
237  * asmlinkage void aesni_gcm_init_avx_gen4()
238  * gcm_data *my_ctx_data, context data
239  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
240  */
241 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
242                                         struct gcm_context_data *gdata,
243                                         u8 *iv,
244                                         u8 *hash_subkey,
245                                         const u8 *aad,
246                                         unsigned long aad_len);
247
248 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
249                                      struct gcm_context_data *gdata, u8 *out,
250                                      const u8 *in, unsigned long plaintext_len);
251 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
252                                      struct gcm_context_data *gdata, u8 *out,
253                                      const u8 *in,
254                                      unsigned long ciphertext_len);
255 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
256                                    struct gcm_context_data *gdata,
257                                    u8 *auth_tag, unsigned long auth_tag_len);
258
259 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
260                                 struct gcm_context_data *gdata, u8 *out,
261                         const u8 *in, unsigned long plaintext_len, u8 *iv,
262                         const u8 *aad, unsigned long aad_len,
263                         u8 *auth_tag, unsigned long auth_tag_len);
264
265 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
266                                 struct gcm_context_data *gdata, u8 *out,
267                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
268                         const u8 *aad, unsigned long aad_len,
269                         u8 *auth_tag, unsigned long auth_tag_len);
270
271 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
272         .init = &aesni_gcm_init_avx_gen4,
273         .enc_update = &aesni_gcm_enc_update_avx_gen4,
274         .dec_update = &aesni_gcm_dec_update_avx_gen4,
275         .finalize = &aesni_gcm_finalize_avx_gen4,
276 };
277
278 static inline struct
279 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
280 {
281         unsigned long align = AESNI_ALIGN;
282
283         if (align <= crypto_tfm_ctx_alignment())
284                 align = 1;
285         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
286 }
287
288 static inline struct
289 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
290 {
291         unsigned long align = AESNI_ALIGN;
292
293         if (align <= crypto_tfm_ctx_alignment())
294                 align = 1;
295         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
296 }
297 #endif
298
299 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
300 {
301         unsigned long addr = (unsigned long)raw_ctx;
302         unsigned long align = AESNI_ALIGN;
303
304         if (align <= crypto_tfm_ctx_alignment())
305                 align = 1;
306         return (struct crypto_aes_ctx *)ALIGN(addr, align);
307 }
308
309 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
310                               const u8 *in_key, unsigned int key_len)
311 {
312         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
313         int err;
314
315         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
316             key_len != AES_KEYSIZE_256)
317                 return -EINVAL;
318
319         if (!crypto_simd_usable())
320                 err = aes_expandkey(ctx, in_key, key_len);
321         else {
322                 kernel_fpu_begin();
323                 err = aesni_set_key(ctx, in_key, key_len);
324                 kernel_fpu_end();
325         }
326
327         return err;
328 }
329
330 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
331                        unsigned int key_len)
332 {
333         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
334 }
335
336 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
337 {
338         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
339
340         if (!crypto_simd_usable()) {
341                 aes_encrypt(ctx, dst, src);
342         } else {
343                 kernel_fpu_begin();
344                 aesni_enc(ctx, dst, src);
345                 kernel_fpu_end();
346         }
347 }
348
349 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
350 {
351         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
352
353         if (!crypto_simd_usable()) {
354                 aes_decrypt(ctx, dst, src);
355         } else {
356                 kernel_fpu_begin();
357                 aesni_dec(ctx, dst, src);
358                 kernel_fpu_end();
359         }
360 }
361
362 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
363                                  unsigned int len)
364 {
365         return aes_set_key_common(crypto_skcipher_tfm(tfm),
366                                   crypto_skcipher_ctx(tfm), key, len);
367 }
368
369 static int ecb_encrypt(struct skcipher_request *req)
370 {
371         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
372         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
373         struct skcipher_walk walk;
374         unsigned int nbytes;
375         int err;
376
377         err = skcipher_walk_virt(&walk, req, true);
378
379         kernel_fpu_begin();
380         while ((nbytes = walk.nbytes)) {
381                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
382                               nbytes & AES_BLOCK_MASK);
383                 nbytes &= AES_BLOCK_SIZE - 1;
384                 err = skcipher_walk_done(&walk, nbytes);
385         }
386         kernel_fpu_end();
387
388         return err;
389 }
390
391 static int ecb_decrypt(struct skcipher_request *req)
392 {
393         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
394         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
395         struct skcipher_walk walk;
396         unsigned int nbytes;
397         int err;
398
399         err = skcipher_walk_virt(&walk, req, true);
400
401         kernel_fpu_begin();
402         while ((nbytes = walk.nbytes)) {
403                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
404                               nbytes & AES_BLOCK_MASK);
405                 nbytes &= AES_BLOCK_SIZE - 1;
406                 err = skcipher_walk_done(&walk, nbytes);
407         }
408         kernel_fpu_end();
409
410         return err;
411 }
412
413 static int cbc_encrypt(struct skcipher_request *req)
414 {
415         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
416         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
417         struct skcipher_walk walk;
418         unsigned int nbytes;
419         int err;
420
421         err = skcipher_walk_virt(&walk, req, true);
422
423         kernel_fpu_begin();
424         while ((nbytes = walk.nbytes)) {
425                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
426                               nbytes & AES_BLOCK_MASK, walk.iv);
427                 nbytes &= AES_BLOCK_SIZE - 1;
428                 err = skcipher_walk_done(&walk, nbytes);
429         }
430         kernel_fpu_end();
431
432         return err;
433 }
434
435 static int cbc_decrypt(struct skcipher_request *req)
436 {
437         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
438         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
439         struct skcipher_walk walk;
440         unsigned int nbytes;
441         int err;
442
443         err = skcipher_walk_virt(&walk, req, true);
444
445         kernel_fpu_begin();
446         while ((nbytes = walk.nbytes)) {
447                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
448                               nbytes & AES_BLOCK_MASK, walk.iv);
449                 nbytes &= AES_BLOCK_SIZE - 1;
450                 err = skcipher_walk_done(&walk, nbytes);
451         }
452         kernel_fpu_end();
453
454         return err;
455 }
456
457 #ifdef CONFIG_X86_64
458 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
459                             struct skcipher_walk *walk)
460 {
461         u8 *ctrblk = walk->iv;
462         u8 keystream[AES_BLOCK_SIZE];
463         u8 *src = walk->src.virt.addr;
464         u8 *dst = walk->dst.virt.addr;
465         unsigned int nbytes = walk->nbytes;
466
467         aesni_enc(ctx, keystream, ctrblk);
468         crypto_xor_cpy(dst, keystream, src, nbytes);
469
470         crypto_inc(ctrblk, AES_BLOCK_SIZE);
471 }
472
473 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
474                               const u8 *in, unsigned int len, u8 *iv)
475 {
476         /*
477          * based on key length, override with the by8 version
478          * of ctr mode encryption/decryption for improved performance
479          * aes_set_key_common() ensures that key length is one of
480          * {128,192,256}
481          */
482         if (ctx->key_length == AES_KEYSIZE_128)
483                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
484         else if (ctx->key_length == AES_KEYSIZE_192)
485                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
486         else
487                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
488 }
489
490 static int ctr_crypt(struct skcipher_request *req)
491 {
492         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
493         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
494         struct skcipher_walk walk;
495         unsigned int nbytes;
496         int err;
497
498         err = skcipher_walk_virt(&walk, req, true);
499
500         kernel_fpu_begin();
501         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
502                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
503                                       nbytes & AES_BLOCK_MASK, walk.iv);
504                 nbytes &= AES_BLOCK_SIZE - 1;
505                 err = skcipher_walk_done(&walk, nbytes);
506         }
507         if (walk.nbytes) {
508                 ctr_crypt_final(ctx, &walk);
509                 err = skcipher_walk_done(&walk, 0);
510         }
511         kernel_fpu_end();
512
513         return err;
514 }
515
516 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
517                             unsigned int keylen)
518 {
519         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
520         int err;
521
522         err = xts_verify_key(tfm, key, keylen);
523         if (err)
524                 return err;
525
526         keylen /= 2;
527
528         /* first half of xts-key is for crypt */
529         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
530                                  key, keylen);
531         if (err)
532                 return err;
533
534         /* second half of xts-key is for tweak */
535         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
536                                   key + keylen, keylen);
537 }
538
539
540 static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
541 {
542         glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
543 }
544
545 static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
546 {
547         glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
548 }
549
550 static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
551 {
552         aesni_xts_crypt8(ctx, dst, src, true, iv);
553 }
554
555 static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
556 {
557         aesni_xts_crypt8(ctx, dst, src, false, iv);
558 }
559
560 static const struct common_glue_ctx aesni_enc_xts = {
561         .num_funcs = 2,
562         .fpu_blocks_limit = 1,
563
564         .funcs = { {
565                 .num_blocks = 8,
566                 .fn_u = { .xts = aesni_xts_enc8 }
567         }, {
568                 .num_blocks = 1,
569                 .fn_u = { .xts = aesni_xts_enc }
570         } }
571 };
572
573 static const struct common_glue_ctx aesni_dec_xts = {
574         .num_funcs = 2,
575         .fpu_blocks_limit = 1,
576
577         .funcs = { {
578                 .num_blocks = 8,
579                 .fn_u = { .xts = aesni_xts_dec8 }
580         }, {
581                 .num_blocks = 1,
582                 .fn_u = { .xts = aesni_xts_dec }
583         } }
584 };
585
586 static int xts_encrypt(struct skcipher_request *req)
587 {
588         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
589         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
590
591         return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
592                                    aes_ctx(ctx->raw_tweak_ctx),
593                                    aes_ctx(ctx->raw_crypt_ctx),
594                                    false);
595 }
596
597 static int xts_decrypt(struct skcipher_request *req)
598 {
599         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
600         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
601
602         return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
603                                    aes_ctx(ctx->raw_tweak_ctx),
604                                    aes_ctx(ctx->raw_crypt_ctx),
605                                    true);
606 }
607
608 static int
609 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
610 {
611         struct crypto_aes_ctx ctx;
612         int ret;
613
614         ret = aes_expandkey(&ctx, key, key_len);
615         if (ret)
616                 return ret;
617
618         /* Clear the data in the hash sub key container to zero.*/
619         /* We want to cipher all zeros to create the hash sub key. */
620         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
621
622         aes_encrypt(&ctx, hash_subkey, hash_subkey);
623
624         memzero_explicit(&ctx, sizeof(ctx));
625         return 0;
626 }
627
628 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
629                                   unsigned int key_len)
630 {
631         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
632
633         if (key_len < 4)
634                 return -EINVAL;
635
636         /*Account for 4 byte nonce at the end.*/
637         key_len -= 4;
638
639         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
640
641         return aes_set_key_common(crypto_aead_tfm(aead),
642                                   &ctx->aes_key_expanded, key, key_len) ?:
643                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
644 }
645
646 /* This is the Integrity Check Value (aka the authentication tag) length and can
647  * be 8, 12 or 16 bytes long. */
648 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
649                                        unsigned int authsize)
650 {
651         switch (authsize) {
652         case 8:
653         case 12:
654         case 16:
655                 break;
656         default:
657                 return -EINVAL;
658         }
659
660         return 0;
661 }
662
663 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
664                                        unsigned int authsize)
665 {
666         switch (authsize) {
667         case 4:
668         case 8:
669         case 12:
670         case 13:
671         case 14:
672         case 15:
673         case 16:
674                 break;
675         default:
676                 return -EINVAL;
677         }
678
679         return 0;
680 }
681
682 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
683                               unsigned int assoclen, u8 *hash_subkey,
684                               u8 *iv, void *aes_ctx)
685 {
686         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
687         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
688         const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
689         struct gcm_context_data data AESNI_ALIGN_ATTR;
690         struct scatter_walk dst_sg_walk = {};
691         unsigned long left = req->cryptlen;
692         unsigned long len, srclen, dstlen;
693         struct scatter_walk assoc_sg_walk;
694         struct scatter_walk src_sg_walk;
695         struct scatterlist src_start[2];
696         struct scatterlist dst_start[2];
697         struct scatterlist *src_sg;
698         struct scatterlist *dst_sg;
699         u8 *src, *dst, *assoc;
700         u8 *assocmem = NULL;
701         u8 authTag[16];
702
703         if (!enc)
704                 left -= auth_tag_len;
705
706         if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
707                 gcm_tfm = &aesni_gcm_tfm_avx_gen2;
708         if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
709                 gcm_tfm = &aesni_gcm_tfm_sse;
710
711         /* Linearize assoc, if not already linear */
712         if (req->src->length >= assoclen && req->src->length &&
713                 (!PageHighMem(sg_page(req->src)) ||
714                         req->src->offset + req->src->length <= PAGE_SIZE)) {
715                 scatterwalk_start(&assoc_sg_walk, req->src);
716                 assoc = scatterwalk_map(&assoc_sg_walk);
717         } else {
718                 /* assoc can be any length, so must be on heap */
719                 assocmem = kmalloc(assoclen, GFP_ATOMIC);
720                 if (unlikely(!assocmem))
721                         return -ENOMEM;
722                 assoc = assocmem;
723
724                 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
725         }
726
727         if (left) {
728                 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
729                 scatterwalk_start(&src_sg_walk, src_sg);
730                 if (req->src != req->dst) {
731                         dst_sg = scatterwalk_ffwd(dst_start, req->dst,
732                                                   req->assoclen);
733                         scatterwalk_start(&dst_sg_walk, dst_sg);
734                 }
735         }
736
737         kernel_fpu_begin();
738         gcm_tfm->init(aes_ctx, &data, iv,
739                 hash_subkey, assoc, assoclen);
740         if (req->src != req->dst) {
741                 while (left) {
742                         src = scatterwalk_map(&src_sg_walk);
743                         dst = scatterwalk_map(&dst_sg_walk);
744                         srclen = scatterwalk_clamp(&src_sg_walk, left);
745                         dstlen = scatterwalk_clamp(&dst_sg_walk, left);
746                         len = min(srclen, dstlen);
747                         if (len) {
748                                 if (enc)
749                                         gcm_tfm->enc_update(aes_ctx, &data,
750                                                              dst, src, len);
751                                 else
752                                         gcm_tfm->dec_update(aes_ctx, &data,
753                                                              dst, src, len);
754                         }
755                         left -= len;
756
757                         scatterwalk_unmap(src);
758                         scatterwalk_unmap(dst);
759                         scatterwalk_advance(&src_sg_walk, len);
760                         scatterwalk_advance(&dst_sg_walk, len);
761                         scatterwalk_done(&src_sg_walk, 0, left);
762                         scatterwalk_done(&dst_sg_walk, 1, left);
763                 }
764         } else {
765                 while (left) {
766                         dst = src = scatterwalk_map(&src_sg_walk);
767                         len = scatterwalk_clamp(&src_sg_walk, left);
768                         if (len) {
769                                 if (enc)
770                                         gcm_tfm->enc_update(aes_ctx, &data,
771                                                              src, src, len);
772                                 else
773                                         gcm_tfm->dec_update(aes_ctx, &data,
774                                                              src, src, len);
775                         }
776                         left -= len;
777                         scatterwalk_unmap(src);
778                         scatterwalk_advance(&src_sg_walk, len);
779                         scatterwalk_done(&src_sg_walk, 1, left);
780                 }
781         }
782         gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
783         kernel_fpu_end();
784
785         if (!assocmem)
786                 scatterwalk_unmap(assoc);
787         else
788                 kfree(assocmem);
789
790         if (!enc) {
791                 u8 authTagMsg[16];
792
793                 /* Copy out original authTag */
794                 scatterwalk_map_and_copy(authTagMsg, req->src,
795                                          req->assoclen + req->cryptlen -
796                                          auth_tag_len,
797                                          auth_tag_len, 0);
798
799                 /* Compare generated tag with passed in tag. */
800                 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
801                         -EBADMSG : 0;
802         }
803
804         /* Copy in the authTag */
805         scatterwalk_map_and_copy(authTag, req->dst,
806                                  req->assoclen + req->cryptlen,
807                                  auth_tag_len, 1);
808
809         return 0;
810 }
811
812 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
813                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
814 {
815         return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
816                                 aes_ctx);
817 }
818
819 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
820                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
821 {
822         return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
823                                 aes_ctx);
824 }
825
826 static int helper_rfc4106_encrypt(struct aead_request *req)
827 {
828         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
829         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
830         void *aes_ctx = &(ctx->aes_key_expanded);
831         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
832         unsigned int i;
833         __be32 counter = cpu_to_be32(1);
834
835         /* Assuming we are supporting rfc4106 64-bit extended */
836         /* sequence numbers We need to have the AAD length equal */
837         /* to 16 or 20 bytes */
838         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
839                 return -EINVAL;
840
841         /* IV below built */
842         for (i = 0; i < 4; i++)
843                 *(iv+i) = ctx->nonce[i];
844         for (i = 0; i < 8; i++)
845                 *(iv+4+i) = req->iv[i];
846         *((__be32 *)(iv+12)) = counter;
847
848         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
849                               aes_ctx);
850 }
851
852 static int helper_rfc4106_decrypt(struct aead_request *req)
853 {
854         __be32 counter = cpu_to_be32(1);
855         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
856         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
857         void *aes_ctx = &(ctx->aes_key_expanded);
858         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
859         unsigned int i;
860
861         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
862                 return -EINVAL;
863
864         /* Assuming we are supporting rfc4106 64-bit extended */
865         /* sequence numbers We need to have the AAD length */
866         /* equal to 16 or 20 bytes */
867
868         /* IV below built */
869         for (i = 0; i < 4; i++)
870                 *(iv+i) = ctx->nonce[i];
871         for (i = 0; i < 8; i++)
872                 *(iv+4+i) = req->iv[i];
873         *((__be32 *)(iv+12)) = counter;
874
875         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
876                               aes_ctx);
877 }
878 #endif
879
880 static struct crypto_alg aesni_cipher_alg = {
881         .cra_name               = "aes",
882         .cra_driver_name        = "aes-aesni",
883         .cra_priority           = 300,
884         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
885         .cra_blocksize          = AES_BLOCK_SIZE,
886         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
887         .cra_module             = THIS_MODULE,
888         .cra_u  = {
889                 .cipher = {
890                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
891                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
892                         .cia_setkey             = aes_set_key,
893                         .cia_encrypt            = aesni_encrypt,
894                         .cia_decrypt            = aesni_decrypt
895                 }
896         }
897 };
898
899 static struct skcipher_alg aesni_skciphers[] = {
900         {
901                 .base = {
902                         .cra_name               = "__ecb(aes)",
903                         .cra_driver_name        = "__ecb-aes-aesni",
904                         .cra_priority           = 400,
905                         .cra_flags              = CRYPTO_ALG_INTERNAL,
906                         .cra_blocksize          = AES_BLOCK_SIZE,
907                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
908                         .cra_module             = THIS_MODULE,
909                 },
910                 .min_keysize    = AES_MIN_KEY_SIZE,
911                 .max_keysize    = AES_MAX_KEY_SIZE,
912                 .setkey         = aesni_skcipher_setkey,
913                 .encrypt        = ecb_encrypt,
914                 .decrypt        = ecb_decrypt,
915         }, {
916                 .base = {
917                         .cra_name               = "__cbc(aes)",
918                         .cra_driver_name        = "__cbc-aes-aesni",
919                         .cra_priority           = 400,
920                         .cra_flags              = CRYPTO_ALG_INTERNAL,
921                         .cra_blocksize          = AES_BLOCK_SIZE,
922                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
923                         .cra_module             = THIS_MODULE,
924                 },
925                 .min_keysize    = AES_MIN_KEY_SIZE,
926                 .max_keysize    = AES_MAX_KEY_SIZE,
927                 .ivsize         = AES_BLOCK_SIZE,
928                 .setkey         = aesni_skcipher_setkey,
929                 .encrypt        = cbc_encrypt,
930                 .decrypt        = cbc_decrypt,
931 #ifdef CONFIG_X86_64
932         }, {
933                 .base = {
934                         .cra_name               = "__ctr(aes)",
935                         .cra_driver_name        = "__ctr-aes-aesni",
936                         .cra_priority           = 400,
937                         .cra_flags              = CRYPTO_ALG_INTERNAL,
938                         .cra_blocksize          = 1,
939                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
940                         .cra_module             = THIS_MODULE,
941                 },
942                 .min_keysize    = AES_MIN_KEY_SIZE,
943                 .max_keysize    = AES_MAX_KEY_SIZE,
944                 .ivsize         = AES_BLOCK_SIZE,
945                 .chunksize      = AES_BLOCK_SIZE,
946                 .setkey         = aesni_skcipher_setkey,
947                 .encrypt        = ctr_crypt,
948                 .decrypt        = ctr_crypt,
949         }, {
950                 .base = {
951                         .cra_name               = "__xts(aes)",
952                         .cra_driver_name        = "__xts-aes-aesni",
953                         .cra_priority           = 401,
954                         .cra_flags              = CRYPTO_ALG_INTERNAL,
955                         .cra_blocksize          = AES_BLOCK_SIZE,
956                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
957                         .cra_module             = THIS_MODULE,
958                 },
959                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
960                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
961                 .ivsize         = AES_BLOCK_SIZE,
962                 .setkey         = xts_aesni_setkey,
963                 .encrypt        = xts_encrypt,
964                 .decrypt        = xts_decrypt,
965 #endif
966         }
967 };
968
969 static
970 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
971
972 #ifdef CONFIG_X86_64
973 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
974                                   unsigned int key_len)
975 {
976         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
977
978         return aes_set_key_common(crypto_aead_tfm(aead),
979                                   &ctx->aes_key_expanded, key, key_len) ?:
980                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
981 }
982
983 static int generic_gcmaes_encrypt(struct aead_request *req)
984 {
985         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
986         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
987         void *aes_ctx = &(ctx->aes_key_expanded);
988         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
989         __be32 counter = cpu_to_be32(1);
990
991         memcpy(iv, req->iv, 12);
992         *((__be32 *)(iv+12)) = counter;
993
994         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
995                               aes_ctx);
996 }
997
998 static int generic_gcmaes_decrypt(struct aead_request *req)
999 {
1000         __be32 counter = cpu_to_be32(1);
1001         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1002         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1003         void *aes_ctx = &(ctx->aes_key_expanded);
1004         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1005
1006         memcpy(iv, req->iv, 12);
1007         *((__be32 *)(iv+12)) = counter;
1008
1009         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1010                               aes_ctx);
1011 }
1012
1013 static struct aead_alg aesni_aeads[] = { {
1014         .setkey                 = common_rfc4106_set_key,
1015         .setauthsize            = common_rfc4106_set_authsize,
1016         .encrypt                = helper_rfc4106_encrypt,
1017         .decrypt                = helper_rfc4106_decrypt,
1018         .ivsize                 = GCM_RFC4106_IV_SIZE,
1019         .maxauthsize            = 16,
1020         .base = {
1021                 .cra_name               = "__rfc4106(gcm(aes))",
1022                 .cra_driver_name        = "__rfc4106-gcm-aesni",
1023                 .cra_priority           = 400,
1024                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1025                 .cra_blocksize          = 1,
1026                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1027                 .cra_alignmask          = AESNI_ALIGN - 1,
1028                 .cra_module             = THIS_MODULE,
1029         },
1030 }, {
1031         .setkey                 = generic_gcmaes_set_key,
1032         .setauthsize            = generic_gcmaes_set_authsize,
1033         .encrypt                = generic_gcmaes_encrypt,
1034         .decrypt                = generic_gcmaes_decrypt,
1035         .ivsize                 = GCM_AES_IV_SIZE,
1036         .maxauthsize            = 16,
1037         .base = {
1038                 .cra_name               = "__gcm(aes)",
1039                 .cra_driver_name        = "__generic-gcm-aesni",
1040                 .cra_priority           = 400,
1041                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1042                 .cra_blocksize          = 1,
1043                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1044                 .cra_alignmask          = AESNI_ALIGN - 1,
1045                 .cra_module             = THIS_MODULE,
1046         },
1047 } };
1048 #else
1049 static struct aead_alg aesni_aeads[0];
1050 #endif
1051
1052 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1053
1054 static const struct x86_cpu_id aesni_cpu_id[] = {
1055         X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1056         {}
1057 };
1058 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1059
1060 static int __init aesni_init(void)
1061 {
1062         int err;
1063
1064         if (!x86_match_cpu(aesni_cpu_id))
1065                 return -ENODEV;
1066 #ifdef CONFIG_X86_64
1067         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1068                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1069                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
1070         } else
1071         if (boot_cpu_has(X86_FEATURE_AVX)) {
1072                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1073                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1074         } else {
1075                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1076                 aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1077         }
1078         aesni_ctr_enc_tfm = aesni_ctr_enc;
1079         if (boot_cpu_has(X86_FEATURE_AVX)) {
1080                 /* optimize performance of ctr mode encryption transform */
1081                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1082                 pr_info("AES CTR mode by8 optimization enabled\n");
1083         }
1084 #endif
1085
1086         err = crypto_register_alg(&aesni_cipher_alg);
1087         if (err)
1088                 return err;
1089
1090         err = simd_register_skciphers_compat(aesni_skciphers,
1091                                              ARRAY_SIZE(aesni_skciphers),
1092                                              aesni_simd_skciphers);
1093         if (err)
1094                 goto unregister_cipher;
1095
1096         err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1097                                          aesni_simd_aeads);
1098         if (err)
1099                 goto unregister_skciphers;
1100
1101         return 0;
1102
1103 unregister_skciphers:
1104         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1105                                   aesni_simd_skciphers);
1106 unregister_cipher:
1107         crypto_unregister_alg(&aesni_cipher_alg);
1108         return err;
1109 }
1110
1111 static void __exit aesni_exit(void)
1112 {
1113         simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1114                               aesni_simd_aeads);
1115         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1116                                   aesni_simd_skciphers);
1117         crypto_unregister_alg(&aesni_cipher_alg);
1118 }
1119
1120 late_initcall(aesni_init);
1121 module_exit(aesni_exit);
1122
1123 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1124 MODULE_LICENSE("GPL");
1125 MODULE_ALIAS_CRYPTO("aes");