efef6e6b1d34a793b893b7301599d9a57bfde107
[linux-2.6-microblaze.git] / arch / x86 / crypto / aesni-intel_glue.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36
37
38 #define AESNI_ALIGN     16
39 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
40 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
41 #define RFC4106_HASH_SUBKEY_SIZE 16
42 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
43 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
44 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
45
46 /* This data is stored at the end of the crypto_tfm struct.
47  * It's a type of per "session" data storage location.
48  * This needs to be 16 byte aligned.
49  */
50 struct aesni_rfc4106_gcm_ctx {
51         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
52         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
53         u8 nonce[4];
54 };
55
56 struct generic_gcmaes_ctx {
57         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
58         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
59 };
60
61 struct aesni_xts_ctx {
62         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
63         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
64 };
65
66 #define GCM_BLOCK_LEN 16
67
68 struct gcm_context_data {
69         /* init, update and finalize context data */
70         u8 aad_hash[GCM_BLOCK_LEN];
71         u64 aad_length;
72         u64 in_length;
73         u8 partial_block_enc_key[GCM_BLOCK_LEN];
74         u8 orig_IV[GCM_BLOCK_LEN];
75         u8 current_counter[GCM_BLOCK_LEN];
76         u64 partial_block_len;
77         u64 unused;
78         u8 hash_keys[GCM_BLOCK_LEN * 16];
79 };
80
81 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
82                              unsigned int key_len);
83 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
84 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
85 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                               const u8 *in, unsigned int len);
87 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                               const u8 *in, unsigned int len);
89 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len, u8 *iv);
91 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len, u8 *iv);
93 asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                                   const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                                   const u8 *in, unsigned int len, u8 *iv);
97
98 #define AVX_GEN2_OPTSIZE 640
99 #define AVX_GEN4_OPTSIZE 4096
100
101 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
102                                   const u8 *in, unsigned int len, u8 *iv);
103
104 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
105                                   const u8 *in, unsigned int len, u8 *iv);
106
107 #ifdef CONFIG_X86_64
108
109 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
110                               const u8 *in, unsigned int len, u8 *iv);
111 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
112                               const u8 *in, unsigned int len, u8 *iv);
113
114 /* Scatter / Gather routines, with args similar to above */
115 asmlinkage void aesni_gcm_init(void *ctx,
116                                struct gcm_context_data *gdata,
117                                u8 *iv,
118                                u8 *hash_subkey, const u8 *aad,
119                                unsigned long aad_len);
120 asmlinkage void aesni_gcm_enc_update(void *ctx,
121                                      struct gcm_context_data *gdata, u8 *out,
122                                      const u8 *in, unsigned long plaintext_len);
123 asmlinkage void aesni_gcm_dec_update(void *ctx,
124                                      struct gcm_context_data *gdata, u8 *out,
125                                      const u8 *in,
126                                      unsigned long ciphertext_len);
127 asmlinkage void aesni_gcm_finalize(void *ctx,
128                                    struct gcm_context_data *gdata,
129                                    u8 *auth_tag, unsigned long auth_tag_len);
130
131 static const struct aesni_gcm_tfm_s {
132         void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
133                      u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
134         void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
135                            const u8 *in, unsigned long plaintext_len);
136         void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
137                            const u8 *in, unsigned long ciphertext_len);
138         void (*finalize)(void *ctx, struct gcm_context_data *gdata,
139                          u8 *auth_tag, unsigned long auth_tag_len);
140 } *aesni_gcm_tfm;
141
142 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
143         .init = &aesni_gcm_init,
144         .enc_update = &aesni_gcm_enc_update,
145         .dec_update = &aesni_gcm_dec_update,
146         .finalize = &aesni_gcm_finalize,
147 };
148
149 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
150                 void *keys, u8 *out, unsigned int num_bytes);
151 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
152                 void *keys, u8 *out, unsigned int num_bytes);
153 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
154                 void *keys, u8 *out, unsigned int num_bytes);
155 /*
156  * asmlinkage void aesni_gcm_init_avx_gen2()
157  * gcm_data *my_ctx_data, context data
158  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
159  */
160 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
161                                         struct gcm_context_data *gdata,
162                                         u8 *iv,
163                                         u8 *hash_subkey,
164                                         const u8 *aad,
165                                         unsigned long aad_len);
166
167 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
168                                      struct gcm_context_data *gdata, u8 *out,
169                                      const u8 *in, unsigned long plaintext_len);
170 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
171                                      struct gcm_context_data *gdata, u8 *out,
172                                      const u8 *in,
173                                      unsigned long ciphertext_len);
174 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
175                                    struct gcm_context_data *gdata,
176                                    u8 *auth_tag, unsigned long auth_tag_len);
177
178 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
179         .init = &aesni_gcm_init_avx_gen2,
180         .enc_update = &aesni_gcm_enc_update_avx_gen2,
181         .dec_update = &aesni_gcm_dec_update_avx_gen2,
182         .finalize = &aesni_gcm_finalize_avx_gen2,
183 };
184
185 /*
186  * asmlinkage void aesni_gcm_init_avx_gen4()
187  * gcm_data *my_ctx_data, context data
188  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
189  */
190 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
191                                         struct gcm_context_data *gdata,
192                                         u8 *iv,
193                                         u8 *hash_subkey,
194                                         const u8 *aad,
195                                         unsigned long aad_len);
196
197 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
198                                      struct gcm_context_data *gdata, u8 *out,
199                                      const u8 *in, unsigned long plaintext_len);
200 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
201                                      struct gcm_context_data *gdata, u8 *out,
202                                      const u8 *in,
203                                      unsigned long ciphertext_len);
204 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
205                                    struct gcm_context_data *gdata,
206                                    u8 *auth_tag, unsigned long auth_tag_len);
207
208 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
209         .init = &aesni_gcm_init_avx_gen4,
210         .enc_update = &aesni_gcm_enc_update_avx_gen4,
211         .dec_update = &aesni_gcm_dec_update_avx_gen4,
212         .finalize = &aesni_gcm_finalize_avx_gen4,
213 };
214
215 static inline struct
216 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
217 {
218         unsigned long align = AESNI_ALIGN;
219
220         if (align <= crypto_tfm_ctx_alignment())
221                 align = 1;
222         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
223 }
224
225 static inline struct
226 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
227 {
228         unsigned long align = AESNI_ALIGN;
229
230         if (align <= crypto_tfm_ctx_alignment())
231                 align = 1;
232         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
233 }
234 #endif
235
236 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
237 {
238         unsigned long addr = (unsigned long)raw_ctx;
239         unsigned long align = AESNI_ALIGN;
240
241         if (align <= crypto_tfm_ctx_alignment())
242                 align = 1;
243         return (struct crypto_aes_ctx *)ALIGN(addr, align);
244 }
245
246 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
247                               const u8 *in_key, unsigned int key_len)
248 {
249         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
250         int err;
251
252         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
253             key_len != AES_KEYSIZE_256)
254                 return -EINVAL;
255
256         if (!crypto_simd_usable())
257                 err = aes_expandkey(ctx, in_key, key_len);
258         else {
259                 kernel_fpu_begin();
260                 err = aesni_set_key(ctx, in_key, key_len);
261                 kernel_fpu_end();
262         }
263
264         return err;
265 }
266
267 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
268                        unsigned int key_len)
269 {
270         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
271 }
272
273 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
274 {
275         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
276
277         if (!crypto_simd_usable()) {
278                 aes_encrypt(ctx, dst, src);
279         } else {
280                 kernel_fpu_begin();
281                 aesni_enc(ctx, dst, src);
282                 kernel_fpu_end();
283         }
284 }
285
286 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
287 {
288         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
289
290         if (!crypto_simd_usable()) {
291                 aes_decrypt(ctx, dst, src);
292         } else {
293                 kernel_fpu_begin();
294                 aesni_dec(ctx, dst, src);
295                 kernel_fpu_end();
296         }
297 }
298
299 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
300                                  unsigned int len)
301 {
302         return aes_set_key_common(crypto_skcipher_tfm(tfm),
303                                   crypto_skcipher_ctx(tfm), key, len);
304 }
305
306 static int ecb_encrypt(struct skcipher_request *req)
307 {
308         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
309         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
310         struct skcipher_walk walk;
311         unsigned int nbytes;
312         int err;
313
314         err = skcipher_walk_virt(&walk, req, true);
315
316         kernel_fpu_begin();
317         while ((nbytes = walk.nbytes)) {
318                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
319                               nbytes & AES_BLOCK_MASK);
320                 nbytes &= AES_BLOCK_SIZE - 1;
321                 err = skcipher_walk_done(&walk, nbytes);
322         }
323         kernel_fpu_end();
324
325         return err;
326 }
327
328 static int ecb_decrypt(struct skcipher_request *req)
329 {
330         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
331         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
332         struct skcipher_walk walk;
333         unsigned int nbytes;
334         int err;
335
336         err = skcipher_walk_virt(&walk, req, true);
337
338         kernel_fpu_begin();
339         while ((nbytes = walk.nbytes)) {
340                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
341                               nbytes & AES_BLOCK_MASK);
342                 nbytes &= AES_BLOCK_SIZE - 1;
343                 err = skcipher_walk_done(&walk, nbytes);
344         }
345         kernel_fpu_end();
346
347         return err;
348 }
349
350 static int cbc_encrypt(struct skcipher_request *req)
351 {
352         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
353         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
354         struct skcipher_walk walk;
355         unsigned int nbytes;
356         int err;
357
358         err = skcipher_walk_virt(&walk, req, true);
359
360         kernel_fpu_begin();
361         while ((nbytes = walk.nbytes)) {
362                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
363                               nbytes & AES_BLOCK_MASK, walk.iv);
364                 nbytes &= AES_BLOCK_SIZE - 1;
365                 err = skcipher_walk_done(&walk, nbytes);
366         }
367         kernel_fpu_end();
368
369         return err;
370 }
371
372 static int cbc_decrypt(struct skcipher_request *req)
373 {
374         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
375         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
376         struct skcipher_walk walk;
377         unsigned int nbytes;
378         int err;
379
380         err = skcipher_walk_virt(&walk, req, true);
381
382         kernel_fpu_begin();
383         while ((nbytes = walk.nbytes)) {
384                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
385                               nbytes & AES_BLOCK_MASK, walk.iv);
386                 nbytes &= AES_BLOCK_SIZE - 1;
387                 err = skcipher_walk_done(&walk, nbytes);
388         }
389         kernel_fpu_end();
390
391         return err;
392 }
393
394 static int cts_cbc_encrypt(struct skcipher_request *req)
395 {
396         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
397         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
398         int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
399         struct scatterlist *src = req->src, *dst = req->dst;
400         struct scatterlist sg_src[2], sg_dst[2];
401         struct skcipher_request subreq;
402         struct skcipher_walk walk;
403         int err;
404
405         skcipher_request_set_tfm(&subreq, tfm);
406         skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
407                                       NULL, NULL);
408
409         if (req->cryptlen <= AES_BLOCK_SIZE) {
410                 if (req->cryptlen < AES_BLOCK_SIZE)
411                         return -EINVAL;
412                 cbc_blocks = 1;
413         }
414
415         if (cbc_blocks > 0) {
416                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
417                                            cbc_blocks * AES_BLOCK_SIZE,
418                                            req->iv);
419
420                 err = cbc_encrypt(&subreq);
421                 if (err)
422                         return err;
423
424                 if (req->cryptlen == AES_BLOCK_SIZE)
425                         return 0;
426
427                 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
428                 if (req->dst != req->src)
429                         dst = scatterwalk_ffwd(sg_dst, req->dst,
430                                                subreq.cryptlen);
431         }
432
433         /* handle ciphertext stealing */
434         skcipher_request_set_crypt(&subreq, src, dst,
435                                    req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
436                                    req->iv);
437
438         err = skcipher_walk_virt(&walk, &subreq, false);
439         if (err)
440                 return err;
441
442         kernel_fpu_begin();
443         aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
444                           walk.nbytes, walk.iv);
445         kernel_fpu_end();
446
447         return skcipher_walk_done(&walk, 0);
448 }
449
450 static int cts_cbc_decrypt(struct skcipher_request *req)
451 {
452         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
453         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
454         int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
455         struct scatterlist *src = req->src, *dst = req->dst;
456         struct scatterlist sg_src[2], sg_dst[2];
457         struct skcipher_request subreq;
458         struct skcipher_walk walk;
459         int err;
460
461         skcipher_request_set_tfm(&subreq, tfm);
462         skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
463                                       NULL, NULL);
464
465         if (req->cryptlen <= AES_BLOCK_SIZE) {
466                 if (req->cryptlen < AES_BLOCK_SIZE)
467                         return -EINVAL;
468                 cbc_blocks = 1;
469         }
470
471         if (cbc_blocks > 0) {
472                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
473                                            cbc_blocks * AES_BLOCK_SIZE,
474                                            req->iv);
475
476                 err = cbc_decrypt(&subreq);
477                 if (err)
478                         return err;
479
480                 if (req->cryptlen == AES_BLOCK_SIZE)
481                         return 0;
482
483                 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
484                 if (req->dst != req->src)
485                         dst = scatterwalk_ffwd(sg_dst, req->dst,
486                                                subreq.cryptlen);
487         }
488
489         /* handle ciphertext stealing */
490         skcipher_request_set_crypt(&subreq, src, dst,
491                                    req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
492                                    req->iv);
493
494         err = skcipher_walk_virt(&walk, &subreq, false);
495         if (err)
496                 return err;
497
498         kernel_fpu_begin();
499         aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
500                           walk.nbytes, walk.iv);
501         kernel_fpu_end();
502
503         return skcipher_walk_done(&walk, 0);
504 }
505
506 #ifdef CONFIG_X86_64
507 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
508                             struct skcipher_walk *walk)
509 {
510         u8 *ctrblk = walk->iv;
511         u8 keystream[AES_BLOCK_SIZE];
512         u8 *src = walk->src.virt.addr;
513         u8 *dst = walk->dst.virt.addr;
514         unsigned int nbytes = walk->nbytes;
515
516         aesni_enc(ctx, keystream, ctrblk);
517         crypto_xor_cpy(dst, keystream, src, nbytes);
518
519         crypto_inc(ctrblk, AES_BLOCK_SIZE);
520 }
521
522 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
523                               const u8 *in, unsigned int len, u8 *iv)
524 {
525         /*
526          * based on key length, override with the by8 version
527          * of ctr mode encryption/decryption for improved performance
528          * aes_set_key_common() ensures that key length is one of
529          * {128,192,256}
530          */
531         if (ctx->key_length == AES_KEYSIZE_128)
532                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
533         else if (ctx->key_length == AES_KEYSIZE_192)
534                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
535         else
536                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
537 }
538
539 static int ctr_crypt(struct skcipher_request *req)
540 {
541         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
542         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
543         struct skcipher_walk walk;
544         unsigned int nbytes;
545         int err;
546
547         err = skcipher_walk_virt(&walk, req, true);
548
549         kernel_fpu_begin();
550         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
551                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
552                                       nbytes & AES_BLOCK_MASK, walk.iv);
553                 nbytes &= AES_BLOCK_SIZE - 1;
554                 err = skcipher_walk_done(&walk, nbytes);
555         }
556         if (walk.nbytes) {
557                 ctr_crypt_final(ctx, &walk);
558                 err = skcipher_walk_done(&walk, 0);
559         }
560         kernel_fpu_end();
561
562         return err;
563 }
564
565 static int
566 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
567 {
568         struct crypto_aes_ctx ctx;
569         int ret;
570
571         ret = aes_expandkey(&ctx, key, key_len);
572         if (ret)
573                 return ret;
574
575         /* Clear the data in the hash sub key container to zero.*/
576         /* We want to cipher all zeros to create the hash sub key. */
577         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
578
579         aes_encrypt(&ctx, hash_subkey, hash_subkey);
580
581         memzero_explicit(&ctx, sizeof(ctx));
582         return 0;
583 }
584
585 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
586                                   unsigned int key_len)
587 {
588         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
589
590         if (key_len < 4)
591                 return -EINVAL;
592
593         /*Account for 4 byte nonce at the end.*/
594         key_len -= 4;
595
596         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
597
598         return aes_set_key_common(crypto_aead_tfm(aead),
599                                   &ctx->aes_key_expanded, key, key_len) ?:
600                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
601 }
602
603 /* This is the Integrity Check Value (aka the authentication tag) length and can
604  * be 8, 12 or 16 bytes long. */
605 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
606                                        unsigned int authsize)
607 {
608         switch (authsize) {
609         case 8:
610         case 12:
611         case 16:
612                 break;
613         default:
614                 return -EINVAL;
615         }
616
617         return 0;
618 }
619
620 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
621                                        unsigned int authsize)
622 {
623         switch (authsize) {
624         case 4:
625         case 8:
626         case 12:
627         case 13:
628         case 14:
629         case 15:
630         case 16:
631                 break;
632         default:
633                 return -EINVAL;
634         }
635
636         return 0;
637 }
638
639 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
640                               unsigned int assoclen, u8 *hash_subkey,
641                               u8 *iv, void *aes_ctx)
642 {
643         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
644         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
645         const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
646         u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
647         struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
648         struct scatter_walk dst_sg_walk = {};
649         unsigned long left = req->cryptlen;
650         unsigned long len, srclen, dstlen;
651         struct scatter_walk assoc_sg_walk;
652         struct scatter_walk src_sg_walk;
653         struct scatterlist src_start[2];
654         struct scatterlist dst_start[2];
655         struct scatterlist *src_sg;
656         struct scatterlist *dst_sg;
657         u8 *src, *dst, *assoc;
658         u8 *assocmem = NULL;
659         u8 authTag[16];
660
661         if (!enc)
662                 left -= auth_tag_len;
663
664         if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
665                 gcm_tfm = &aesni_gcm_tfm_avx_gen2;
666         if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
667                 gcm_tfm = &aesni_gcm_tfm_sse;
668
669         /* Linearize assoc, if not already linear */
670         if (req->src->length >= assoclen && req->src->length) {
671                 scatterwalk_start(&assoc_sg_walk, req->src);
672                 assoc = scatterwalk_map(&assoc_sg_walk);
673         } else {
674                 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
675                               GFP_KERNEL : GFP_ATOMIC;
676
677                 /* assoc can be any length, so must be on heap */
678                 assocmem = kmalloc(assoclen, flags);
679                 if (unlikely(!assocmem))
680                         return -ENOMEM;
681                 assoc = assocmem;
682
683                 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
684         }
685
686         if (left) {
687                 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
688                 scatterwalk_start(&src_sg_walk, src_sg);
689                 if (req->src != req->dst) {
690                         dst_sg = scatterwalk_ffwd(dst_start, req->dst,
691                                                   req->assoclen);
692                         scatterwalk_start(&dst_sg_walk, dst_sg);
693                 }
694         }
695
696         kernel_fpu_begin();
697         gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
698         if (req->src != req->dst) {
699                 while (left) {
700                         src = scatterwalk_map(&src_sg_walk);
701                         dst = scatterwalk_map(&dst_sg_walk);
702                         srclen = scatterwalk_clamp(&src_sg_walk, left);
703                         dstlen = scatterwalk_clamp(&dst_sg_walk, left);
704                         len = min(srclen, dstlen);
705                         if (len) {
706                                 if (enc)
707                                         gcm_tfm->enc_update(aes_ctx, data,
708                                                              dst, src, len);
709                                 else
710                                         gcm_tfm->dec_update(aes_ctx, data,
711                                                              dst, src, len);
712                         }
713                         left -= len;
714
715                         scatterwalk_unmap(src);
716                         scatterwalk_unmap(dst);
717                         scatterwalk_advance(&src_sg_walk, len);
718                         scatterwalk_advance(&dst_sg_walk, len);
719                         scatterwalk_done(&src_sg_walk, 0, left);
720                         scatterwalk_done(&dst_sg_walk, 1, left);
721                 }
722         } else {
723                 while (left) {
724                         dst = src = scatterwalk_map(&src_sg_walk);
725                         len = scatterwalk_clamp(&src_sg_walk, left);
726                         if (len) {
727                                 if (enc)
728                                         gcm_tfm->enc_update(aes_ctx, data,
729                                                              src, src, len);
730                                 else
731                                         gcm_tfm->dec_update(aes_ctx, data,
732                                                              src, src, len);
733                         }
734                         left -= len;
735                         scatterwalk_unmap(src);
736                         scatterwalk_advance(&src_sg_walk, len);
737                         scatterwalk_done(&src_sg_walk, 1, left);
738                 }
739         }
740         gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
741         kernel_fpu_end();
742
743         if (!assocmem)
744                 scatterwalk_unmap(assoc);
745         else
746                 kfree(assocmem);
747
748         if (!enc) {
749                 u8 authTagMsg[16];
750
751                 /* Copy out original authTag */
752                 scatterwalk_map_and_copy(authTagMsg, req->src,
753                                          req->assoclen + req->cryptlen -
754                                          auth_tag_len,
755                                          auth_tag_len, 0);
756
757                 /* Compare generated tag with passed in tag. */
758                 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
759                         -EBADMSG : 0;
760         }
761
762         /* Copy in the authTag */
763         scatterwalk_map_and_copy(authTag, req->dst,
764                                  req->assoclen + req->cryptlen,
765                                  auth_tag_len, 1);
766
767         return 0;
768 }
769
770 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
771                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
772 {
773         return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
774                                 aes_ctx);
775 }
776
777 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
778                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
779 {
780         return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
781                                 aes_ctx);
782 }
783
784 static int helper_rfc4106_encrypt(struct aead_request *req)
785 {
786         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
787         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
788         void *aes_ctx = &(ctx->aes_key_expanded);
789         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
790         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
791         unsigned int i;
792         __be32 counter = cpu_to_be32(1);
793
794         /* Assuming we are supporting rfc4106 64-bit extended */
795         /* sequence numbers We need to have the AAD length equal */
796         /* to 16 or 20 bytes */
797         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
798                 return -EINVAL;
799
800         /* IV below built */
801         for (i = 0; i < 4; i++)
802                 *(iv+i) = ctx->nonce[i];
803         for (i = 0; i < 8; i++)
804                 *(iv+4+i) = req->iv[i];
805         *((__be32 *)(iv+12)) = counter;
806
807         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
808                               aes_ctx);
809 }
810
811 static int helper_rfc4106_decrypt(struct aead_request *req)
812 {
813         __be32 counter = cpu_to_be32(1);
814         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
815         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
816         void *aes_ctx = &(ctx->aes_key_expanded);
817         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
818         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
819         unsigned int i;
820
821         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
822                 return -EINVAL;
823
824         /* Assuming we are supporting rfc4106 64-bit extended */
825         /* sequence numbers We need to have the AAD length */
826         /* equal to 16 or 20 bytes */
827
828         /* IV below built */
829         for (i = 0; i < 4; i++)
830                 *(iv+i) = ctx->nonce[i];
831         for (i = 0; i < 8; i++)
832                 *(iv+4+i) = req->iv[i];
833         *((__be32 *)(iv+12)) = counter;
834
835         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
836                               aes_ctx);
837 }
838 #endif
839
840 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
841                             unsigned int keylen)
842 {
843         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
844         int err;
845
846         err = xts_verify_key(tfm, key, keylen);
847         if (err)
848                 return err;
849
850         keylen /= 2;
851
852         /* first half of xts-key is for crypt */
853         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
854                                  key, keylen);
855         if (err)
856                 return err;
857
858         /* second half of xts-key is for tweak */
859         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
860                                   key + keylen, keylen);
861 }
862
863 static int xts_crypt(struct skcipher_request *req, bool encrypt)
864 {
865         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
866         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
867         int tail = req->cryptlen % AES_BLOCK_SIZE;
868         struct skcipher_request subreq;
869         struct skcipher_walk walk;
870         int err;
871
872         if (req->cryptlen < AES_BLOCK_SIZE)
873                 return -EINVAL;
874
875         err = skcipher_walk_virt(&walk, req, false);
876
877         if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
878                 int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
879
880                 skcipher_walk_abort(&walk);
881
882                 skcipher_request_set_tfm(&subreq, tfm);
883                 skcipher_request_set_callback(&subreq,
884                                               skcipher_request_flags(req),
885                                               NULL, NULL);
886                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
887                                            blocks * AES_BLOCK_SIZE, req->iv);
888                 req = &subreq;
889                 err = skcipher_walk_virt(&walk, req, false);
890         } else {
891                 tail = 0;
892         }
893
894         kernel_fpu_begin();
895
896         /* calculate first value of T */
897         aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
898
899         while (walk.nbytes > 0) {
900                 int nbytes = walk.nbytes;
901
902                 if (nbytes < walk.total)
903                         nbytes &= ~(AES_BLOCK_SIZE - 1);
904
905                 if (encrypt)
906                         aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
907                                           walk.dst.virt.addr, walk.src.virt.addr,
908                                           nbytes, walk.iv);
909                 else
910                         aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
911                                           walk.dst.virt.addr, walk.src.virt.addr,
912                                           nbytes, walk.iv);
913                 kernel_fpu_end();
914
915                 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
916
917                 if (walk.nbytes > 0)
918                         kernel_fpu_begin();
919         }
920
921         if (unlikely(tail > 0 && !err)) {
922                 struct scatterlist sg_src[2], sg_dst[2];
923                 struct scatterlist *src, *dst;
924
925                 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
926                 if (req->dst != req->src)
927                         dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
928
929                 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
930                                            req->iv);
931
932                 err = skcipher_walk_virt(&walk, &subreq, false);
933                 if (err)
934                         return err;
935
936                 kernel_fpu_begin();
937                 if (encrypt)
938                         aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
939                                           walk.dst.virt.addr, walk.src.virt.addr,
940                                           walk.nbytes, walk.iv);
941                 else
942                         aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
943                                           walk.dst.virt.addr, walk.src.virt.addr,
944                                           walk.nbytes, walk.iv);
945                 kernel_fpu_end();
946
947                 err = skcipher_walk_done(&walk, 0);
948         }
949         return err;
950 }
951
952 static int xts_encrypt(struct skcipher_request *req)
953 {
954         return xts_crypt(req, true);
955 }
956
957 static int xts_decrypt(struct skcipher_request *req)
958 {
959         return xts_crypt(req, false);
960 }
961
962 static struct crypto_alg aesni_cipher_alg = {
963         .cra_name               = "aes",
964         .cra_driver_name        = "aes-aesni",
965         .cra_priority           = 300,
966         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
967         .cra_blocksize          = AES_BLOCK_SIZE,
968         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
969         .cra_module             = THIS_MODULE,
970         .cra_u  = {
971                 .cipher = {
972                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
973                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
974                         .cia_setkey             = aes_set_key,
975                         .cia_encrypt            = aesni_encrypt,
976                         .cia_decrypt            = aesni_decrypt
977                 }
978         }
979 };
980
981 static struct skcipher_alg aesni_skciphers[] = {
982         {
983                 .base = {
984                         .cra_name               = "__ecb(aes)",
985                         .cra_driver_name        = "__ecb-aes-aesni",
986                         .cra_priority           = 400,
987                         .cra_flags              = CRYPTO_ALG_INTERNAL,
988                         .cra_blocksize          = AES_BLOCK_SIZE,
989                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
990                         .cra_module             = THIS_MODULE,
991                 },
992                 .min_keysize    = AES_MIN_KEY_SIZE,
993                 .max_keysize    = AES_MAX_KEY_SIZE,
994                 .setkey         = aesni_skcipher_setkey,
995                 .encrypt        = ecb_encrypt,
996                 .decrypt        = ecb_decrypt,
997         }, {
998                 .base = {
999                         .cra_name               = "__cbc(aes)",
1000                         .cra_driver_name        = "__cbc-aes-aesni",
1001                         .cra_priority           = 400,
1002                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1003                         .cra_blocksize          = AES_BLOCK_SIZE,
1004                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1005                         .cra_module             = THIS_MODULE,
1006                 },
1007                 .min_keysize    = AES_MIN_KEY_SIZE,
1008                 .max_keysize    = AES_MAX_KEY_SIZE,
1009                 .ivsize         = AES_BLOCK_SIZE,
1010                 .setkey         = aesni_skcipher_setkey,
1011                 .encrypt        = cbc_encrypt,
1012                 .decrypt        = cbc_decrypt,
1013         }, {
1014                 .base = {
1015                         .cra_name               = "__cts(cbc(aes))",
1016                         .cra_driver_name        = "__cts-cbc-aes-aesni",
1017                         .cra_priority           = 400,
1018                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1019                         .cra_blocksize          = AES_BLOCK_SIZE,
1020                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1021                         .cra_module             = THIS_MODULE,
1022                 },
1023                 .min_keysize    = AES_MIN_KEY_SIZE,
1024                 .max_keysize    = AES_MAX_KEY_SIZE,
1025                 .ivsize         = AES_BLOCK_SIZE,
1026                 .walksize       = 2 * AES_BLOCK_SIZE,
1027                 .setkey         = aesni_skcipher_setkey,
1028                 .encrypt        = cts_cbc_encrypt,
1029                 .decrypt        = cts_cbc_decrypt,
1030 #ifdef CONFIG_X86_64
1031         }, {
1032                 .base = {
1033                         .cra_name               = "__ctr(aes)",
1034                         .cra_driver_name        = "__ctr-aes-aesni",
1035                         .cra_priority           = 400,
1036                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1037                         .cra_blocksize          = 1,
1038                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1039                         .cra_module             = THIS_MODULE,
1040                 },
1041                 .min_keysize    = AES_MIN_KEY_SIZE,
1042                 .max_keysize    = AES_MAX_KEY_SIZE,
1043                 .ivsize         = AES_BLOCK_SIZE,
1044                 .chunksize      = AES_BLOCK_SIZE,
1045                 .setkey         = aesni_skcipher_setkey,
1046                 .encrypt        = ctr_crypt,
1047                 .decrypt        = ctr_crypt,
1048 #endif
1049         }, {
1050                 .base = {
1051                         .cra_name               = "__xts(aes)",
1052                         .cra_driver_name        = "__xts-aes-aesni",
1053                         .cra_priority           = 401,
1054                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1055                         .cra_blocksize          = AES_BLOCK_SIZE,
1056                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
1057                         .cra_module             = THIS_MODULE,
1058                 },
1059                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1060                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1061                 .ivsize         = AES_BLOCK_SIZE,
1062                 .walksize       = 2 * AES_BLOCK_SIZE,
1063                 .setkey         = xts_aesni_setkey,
1064                 .encrypt        = xts_encrypt,
1065                 .decrypt        = xts_decrypt,
1066         }
1067 };
1068
1069 static
1070 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1071
1072 #ifdef CONFIG_X86_64
1073 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1074                                   unsigned int key_len)
1075 {
1076         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1077
1078         return aes_set_key_common(crypto_aead_tfm(aead),
1079                                   &ctx->aes_key_expanded, key, key_len) ?:
1080                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1081 }
1082
1083 static int generic_gcmaes_encrypt(struct aead_request *req)
1084 {
1085         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1086         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1087         void *aes_ctx = &(ctx->aes_key_expanded);
1088         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1089         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1090         __be32 counter = cpu_to_be32(1);
1091
1092         memcpy(iv, req->iv, 12);
1093         *((__be32 *)(iv+12)) = counter;
1094
1095         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1096                               aes_ctx);
1097 }
1098
1099 static int generic_gcmaes_decrypt(struct aead_request *req)
1100 {
1101         __be32 counter = cpu_to_be32(1);
1102         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1103         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1104         void *aes_ctx = &(ctx->aes_key_expanded);
1105         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1106         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1107
1108         memcpy(iv, req->iv, 12);
1109         *((__be32 *)(iv+12)) = counter;
1110
1111         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1112                               aes_ctx);
1113 }
1114
1115 static struct aead_alg aesni_aeads[] = { {
1116         .setkey                 = common_rfc4106_set_key,
1117         .setauthsize            = common_rfc4106_set_authsize,
1118         .encrypt                = helper_rfc4106_encrypt,
1119         .decrypt                = helper_rfc4106_decrypt,
1120         .ivsize                 = GCM_RFC4106_IV_SIZE,
1121         .maxauthsize            = 16,
1122         .base = {
1123                 .cra_name               = "__rfc4106(gcm(aes))",
1124                 .cra_driver_name        = "__rfc4106-gcm-aesni",
1125                 .cra_priority           = 400,
1126                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1127                 .cra_blocksize          = 1,
1128                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1129                 .cra_alignmask          = AESNI_ALIGN - 1,
1130                 .cra_module             = THIS_MODULE,
1131         },
1132 }, {
1133         .setkey                 = generic_gcmaes_set_key,
1134         .setauthsize            = generic_gcmaes_set_authsize,
1135         .encrypt                = generic_gcmaes_encrypt,
1136         .decrypt                = generic_gcmaes_decrypt,
1137         .ivsize                 = GCM_AES_IV_SIZE,
1138         .maxauthsize            = 16,
1139         .base = {
1140                 .cra_name               = "__gcm(aes)",
1141                 .cra_driver_name        = "__generic-gcm-aesni",
1142                 .cra_priority           = 400,
1143                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1144                 .cra_blocksize          = 1,
1145                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1146                 .cra_alignmask          = AESNI_ALIGN - 1,
1147                 .cra_module             = THIS_MODULE,
1148         },
1149 } };
1150 #else
1151 static struct aead_alg aesni_aeads[0];
1152 #endif
1153
1154 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1155
1156 static const struct x86_cpu_id aesni_cpu_id[] = {
1157         X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1158         {}
1159 };
1160 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1161
1162 static int __init aesni_init(void)
1163 {
1164         int err;
1165
1166         if (!x86_match_cpu(aesni_cpu_id))
1167                 return -ENODEV;
1168 #ifdef CONFIG_X86_64
1169         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1170                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1171                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
1172         } else
1173         if (boot_cpu_has(X86_FEATURE_AVX)) {
1174                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1175                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1176         } else {
1177                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1178                 aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1179         }
1180         aesni_ctr_enc_tfm = aesni_ctr_enc;
1181         if (boot_cpu_has(X86_FEATURE_AVX)) {
1182                 /* optimize performance of ctr mode encryption transform */
1183                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1184                 pr_info("AES CTR mode by8 optimization enabled\n");
1185         }
1186 #endif
1187
1188         err = crypto_register_alg(&aesni_cipher_alg);
1189         if (err)
1190                 return err;
1191
1192         err = simd_register_skciphers_compat(aesni_skciphers,
1193                                              ARRAY_SIZE(aesni_skciphers),
1194                                              aesni_simd_skciphers);
1195         if (err)
1196                 goto unregister_cipher;
1197
1198         err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1199                                          aesni_simd_aeads);
1200         if (err)
1201                 goto unregister_skciphers;
1202
1203         return 0;
1204
1205 unregister_skciphers:
1206         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1207                                   aesni_simd_skciphers);
1208 unregister_cipher:
1209         crypto_unregister_alg(&aesni_cipher_alg);
1210         return err;
1211 }
1212
1213 static void __exit aesni_exit(void)
1214 {
1215         simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1216                               aesni_simd_aeads);
1217         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1218                                   aesni_simd_skciphers);
1219         crypto_unregister_alg(&aesni_cipher_alg);
1220 }
1221
1222 late_initcall(aesni_init);
1223 module_exit(aesni_exit);
1224
1225 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1226 MODULE_LICENSE("GPL");
1227 MODULE_ALIAS_CRYPTO("aes");