crypto: sha - split sha.h into sha1.h and sha2.h
[linux-2.6-microblaze.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/hash.h>
12 #include <crypto/hmac.h>
13 #include <crypto/algapi.h>
14 #include <crypto/authenc.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/xts.h>
17 #include <linux/dma-mapping.h>
18 #include "adf_accel_devices.h"
19 #include "adf_transport.h"
20 #include "adf_common_drv.h"
21 #include "qat_crypto.h"
22 #include "icp_qat_hw.h"
23 #include "icp_qat_fw.h"
24 #include "icp_qat_fw_la.h"
25
26 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
27         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
28                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
29                                        ICP_QAT_HW_CIPHER_ENCRYPT)
30
31 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
32         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
33                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
34                                        ICP_QAT_HW_CIPHER_DECRYPT)
35
36 static DEFINE_MUTEX(algs_lock);
37 static unsigned int active_devs;
38
39 struct qat_alg_buf {
40         u32 len;
41         u32 resrvd;
42         u64 addr;
43 } __packed;
44
45 struct qat_alg_buf_list {
46         u64 resrvd;
47         u32 num_bufs;
48         u32 num_mapped_bufs;
49         struct qat_alg_buf bufers[];
50 } __packed __aligned(64);
51
52 /* Common content descriptor */
53 struct qat_alg_cd {
54         union {
55                 struct qat_enc { /* Encrypt content desc */
56                         struct icp_qat_hw_cipher_algo_blk cipher;
57                         struct icp_qat_hw_auth_algo_blk hash;
58                 } qat_enc_cd;
59                 struct qat_dec { /* Decrypt content desc */
60                         struct icp_qat_hw_auth_algo_blk hash;
61                         struct icp_qat_hw_cipher_algo_blk cipher;
62                 } qat_dec_cd;
63         };
64 } __aligned(64);
65
66 struct qat_alg_aead_ctx {
67         struct qat_alg_cd *enc_cd;
68         struct qat_alg_cd *dec_cd;
69         dma_addr_t enc_cd_paddr;
70         dma_addr_t dec_cd_paddr;
71         struct icp_qat_fw_la_bulk_req enc_fw_req;
72         struct icp_qat_fw_la_bulk_req dec_fw_req;
73         struct crypto_shash *hash_tfm;
74         enum icp_qat_hw_auth_algo qat_hash_alg;
75         struct qat_crypto_instance *inst;
76         union {
77                 struct sha1_state sha1;
78                 struct sha256_state sha256;
79                 struct sha512_state sha512;
80         };
81         char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
82         char opad[SHA512_BLOCK_SIZE];
83 };
84
85 struct qat_alg_skcipher_ctx {
86         struct icp_qat_hw_cipher_algo_blk *enc_cd;
87         struct icp_qat_hw_cipher_algo_blk *dec_cd;
88         dma_addr_t enc_cd_paddr;
89         dma_addr_t dec_cd_paddr;
90         struct icp_qat_fw_la_bulk_req enc_fw_req;
91         struct icp_qat_fw_la_bulk_req dec_fw_req;
92         struct qat_crypto_instance *inst;
93         struct crypto_skcipher *ftfm;
94         bool fallback;
95         int mode;
96 };
97
98 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
99 {
100         switch (qat_hash_alg) {
101         case ICP_QAT_HW_AUTH_ALGO_SHA1:
102                 return ICP_QAT_HW_SHA1_STATE1_SZ;
103         case ICP_QAT_HW_AUTH_ALGO_SHA256:
104                 return ICP_QAT_HW_SHA256_STATE1_SZ;
105         case ICP_QAT_HW_AUTH_ALGO_SHA512:
106                 return ICP_QAT_HW_SHA512_STATE1_SZ;
107         default:
108                 return -EFAULT;
109         }
110         return -EFAULT;
111 }
112
113 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
114                                   struct qat_alg_aead_ctx *ctx,
115                                   const u8 *auth_key,
116                                   unsigned int auth_keylen)
117 {
118         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
119         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
120         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
121         __be32 *hash_state_out;
122         __be64 *hash512_state_out;
123         int i, offset;
124
125         memset(ctx->ipad, 0, block_size);
126         memset(ctx->opad, 0, block_size);
127         shash->tfm = ctx->hash_tfm;
128
129         if (auth_keylen > block_size) {
130                 int ret = crypto_shash_digest(shash, auth_key,
131                                               auth_keylen, ctx->ipad);
132                 if (ret)
133                         return ret;
134
135                 memcpy(ctx->opad, ctx->ipad, digest_size);
136         } else {
137                 memcpy(ctx->ipad, auth_key, auth_keylen);
138                 memcpy(ctx->opad, auth_key, auth_keylen);
139         }
140
141         for (i = 0; i < block_size; i++) {
142                 char *ipad_ptr = ctx->ipad + i;
143                 char *opad_ptr = ctx->opad + i;
144                 *ipad_ptr ^= HMAC_IPAD_VALUE;
145                 *opad_ptr ^= HMAC_OPAD_VALUE;
146         }
147
148         if (crypto_shash_init(shash))
149                 return -EFAULT;
150
151         if (crypto_shash_update(shash, ctx->ipad, block_size))
152                 return -EFAULT;
153
154         hash_state_out = (__be32 *)hash->sha.state1;
155         hash512_state_out = (__be64 *)hash_state_out;
156
157         switch (ctx->qat_hash_alg) {
158         case ICP_QAT_HW_AUTH_ALGO_SHA1:
159                 if (crypto_shash_export(shash, &ctx->sha1))
160                         return -EFAULT;
161                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
162                         *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
163                 break;
164         case ICP_QAT_HW_AUTH_ALGO_SHA256:
165                 if (crypto_shash_export(shash, &ctx->sha256))
166                         return -EFAULT;
167                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
168                         *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
169                 break;
170         case ICP_QAT_HW_AUTH_ALGO_SHA512:
171                 if (crypto_shash_export(shash, &ctx->sha512))
172                         return -EFAULT;
173                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
174                         *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
175                 break;
176         default:
177                 return -EFAULT;
178         }
179
180         if (crypto_shash_init(shash))
181                 return -EFAULT;
182
183         if (crypto_shash_update(shash, ctx->opad, block_size))
184                 return -EFAULT;
185
186         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
187         if (offset < 0)
188                 return -EFAULT;
189
190         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
191         hash512_state_out = (__be64 *)hash_state_out;
192
193         switch (ctx->qat_hash_alg) {
194         case ICP_QAT_HW_AUTH_ALGO_SHA1:
195                 if (crypto_shash_export(shash, &ctx->sha1))
196                         return -EFAULT;
197                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
198                         *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
199                 break;
200         case ICP_QAT_HW_AUTH_ALGO_SHA256:
201                 if (crypto_shash_export(shash, &ctx->sha256))
202                         return -EFAULT;
203                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
204                         *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
205                 break;
206         case ICP_QAT_HW_AUTH_ALGO_SHA512:
207                 if (crypto_shash_export(shash, &ctx->sha512))
208                         return -EFAULT;
209                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
210                         *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
211                 break;
212         default:
213                 return -EFAULT;
214         }
215         memzero_explicit(ctx->ipad, block_size);
216         memzero_explicit(ctx->opad, block_size);
217         return 0;
218 }
219
220 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
221 {
222         header->hdr_flags =
223                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
224         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
225         header->comn_req_flags =
226                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
227                                             QAT_COMN_PTR_TYPE_SGL);
228         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
229                                   ICP_QAT_FW_LA_PARTIAL_NONE);
230         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
231                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
232         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
233                                 ICP_QAT_FW_LA_NO_PROTO);
234         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
235                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
236 }
237
238 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
239                                          int alg,
240                                          struct crypto_authenc_keys *keys,
241                                          int mode)
242 {
243         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
244         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
245         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
246         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
247         struct icp_qat_hw_auth_algo_blk *hash =
248                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
249                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
250         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
251         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
252         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
253         void *ptr = &req_tmpl->cd_ctrl;
254         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
255         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
256
257         /* CD setup */
258         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
259         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
260         hash->sha.inner_setup.auth_config.config =
261                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
262                                              ctx->qat_hash_alg, digestsize);
263         hash->sha.inner_setup.auth_counter.counter =
264                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
265
266         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
267                 return -EFAULT;
268
269         /* Request setup */
270         qat_alg_init_common_hdr(header);
271         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
272         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
273                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
274         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
275                                    ICP_QAT_FW_LA_RET_AUTH_RES);
276         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
277                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
278         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
279         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
280
281         /* Cipher CD config setup */
282         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
283         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
284         cipher_cd_ctrl->cipher_cfg_offset = 0;
285         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
286         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
287         /* Auth CD config setup */
288         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
289         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
290         hash_cd_ctrl->inner_res_sz = digestsize;
291         hash_cd_ctrl->final_sz = digestsize;
292
293         switch (ctx->qat_hash_alg) {
294         case ICP_QAT_HW_AUTH_ALGO_SHA1:
295                 hash_cd_ctrl->inner_state1_sz =
296                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
297                 hash_cd_ctrl->inner_state2_sz =
298                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
299                 break;
300         case ICP_QAT_HW_AUTH_ALGO_SHA256:
301                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
302                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
303                 break;
304         case ICP_QAT_HW_AUTH_ALGO_SHA512:
305                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
306                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
307                 break;
308         default:
309                 break;
310         }
311         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
312                         ((sizeof(struct icp_qat_hw_auth_setup) +
313                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
314         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
315         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
316         return 0;
317 }
318
319 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
320                                          int alg,
321                                          struct crypto_authenc_keys *keys,
322                                          int mode)
323 {
324         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
325         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
326         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
327         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
328         struct icp_qat_hw_cipher_algo_blk *cipher =
329                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
330                 sizeof(struct icp_qat_hw_auth_setup) +
331                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
332         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
333         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
334         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
335         void *ptr = &req_tmpl->cd_ctrl;
336         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
337         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
338         struct icp_qat_fw_la_auth_req_params *auth_param =
339                 (struct icp_qat_fw_la_auth_req_params *)
340                 ((char *)&req_tmpl->serv_specif_rqpars +
341                 sizeof(struct icp_qat_fw_la_cipher_req_params));
342
343         /* CD setup */
344         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
345         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
346         hash->sha.inner_setup.auth_config.config =
347                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
348                                              ctx->qat_hash_alg,
349                                              digestsize);
350         hash->sha.inner_setup.auth_counter.counter =
351                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
352
353         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
354                 return -EFAULT;
355
356         /* Request setup */
357         qat_alg_init_common_hdr(header);
358         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
359         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
360                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
361         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
362                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
363         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
364                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
365         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
366         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
367
368         /* Cipher CD config setup */
369         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
370         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
371         cipher_cd_ctrl->cipher_cfg_offset =
372                 (sizeof(struct icp_qat_hw_auth_setup) +
373                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
374         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
375         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
376
377         /* Auth CD config setup */
378         hash_cd_ctrl->hash_cfg_offset = 0;
379         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
380         hash_cd_ctrl->inner_res_sz = digestsize;
381         hash_cd_ctrl->final_sz = digestsize;
382
383         switch (ctx->qat_hash_alg) {
384         case ICP_QAT_HW_AUTH_ALGO_SHA1:
385                 hash_cd_ctrl->inner_state1_sz =
386                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
387                 hash_cd_ctrl->inner_state2_sz =
388                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
389                 break;
390         case ICP_QAT_HW_AUTH_ALGO_SHA256:
391                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
392                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
393                 break;
394         case ICP_QAT_HW_AUTH_ALGO_SHA512:
395                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
396                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
397                 break;
398         default:
399                 break;
400         }
401
402         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
403                         ((sizeof(struct icp_qat_hw_auth_setup) +
404                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
405         auth_param->auth_res_sz = digestsize;
406         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
407         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
408         return 0;
409 }
410
411 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
412                                       struct icp_qat_fw_la_bulk_req *req,
413                                       struct icp_qat_hw_cipher_algo_blk *cd,
414                                       const u8 *key, unsigned int keylen)
415 {
416         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
417         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
418         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
419
420         memcpy(cd->aes.key, key, keylen);
421         qat_alg_init_common_hdr(header);
422         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
423         cd_pars->u.s.content_desc_params_sz =
424                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
425         /* Cipher CD config setup */
426         cd_ctrl->cipher_key_sz = keylen >> 3;
427         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
428         cd_ctrl->cipher_cfg_offset = 0;
429         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
430         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
431 }
432
433 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
434                                       int alg, const u8 *key,
435                                       unsigned int keylen, int mode)
436 {
437         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
438         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
439         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
440
441         qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
442         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
443         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
444 }
445
446 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
447                                       int alg, const u8 *key,
448                                       unsigned int keylen, int mode)
449 {
450         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
451         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
452         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
453
454         qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
455         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
456
457         if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
458                 dec_cd->aes.cipher_config.val =
459                                         QAT_AES_HW_CONFIG_DEC(alg, mode);
460         else
461                 dec_cd->aes.cipher_config.val =
462                                         QAT_AES_HW_CONFIG_ENC(alg, mode);
463 }
464
465 static int qat_alg_validate_key(int key_len, int *alg, int mode)
466 {
467         if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
468                 switch (key_len) {
469                 case AES_KEYSIZE_128:
470                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
471                         break;
472                 case AES_KEYSIZE_192:
473                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
474                         break;
475                 case AES_KEYSIZE_256:
476                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
477                         break;
478                 default:
479                         return -EINVAL;
480                 }
481         } else {
482                 switch (key_len) {
483                 case AES_KEYSIZE_128 << 1:
484                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
485                         break;
486                 case AES_KEYSIZE_256 << 1:
487                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
488                         break;
489                 default:
490                         return -EINVAL;
491                 }
492         }
493         return 0;
494 }
495
496 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
497                                       unsigned int keylen,  int mode)
498 {
499         struct crypto_authenc_keys keys;
500         int alg;
501
502         if (crypto_authenc_extractkeys(&keys, key, keylen))
503                 goto bad_key;
504
505         if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
506                 goto bad_key;
507
508         if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
509                 goto error;
510
511         if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
512                 goto error;
513
514         memzero_explicit(&keys, sizeof(keys));
515         return 0;
516 bad_key:
517         memzero_explicit(&keys, sizeof(keys));
518         return -EINVAL;
519 error:
520         memzero_explicit(&keys, sizeof(keys));
521         return -EFAULT;
522 }
523
524 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
525                                           const u8 *key,
526                                           unsigned int keylen,
527                                           int mode)
528 {
529         int alg;
530
531         if (qat_alg_validate_key(keylen, &alg, mode))
532                 return -EINVAL;
533
534         qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
535         qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
536         return 0;
537 }
538
539 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
540                               unsigned int keylen)
541 {
542         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
543
544         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
545         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
546         memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
547         memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
548
549         return qat_alg_aead_init_sessions(tfm, key, keylen,
550                                           ICP_QAT_HW_CIPHER_CBC_MODE);
551 }
552
553 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
554                                unsigned int keylen)
555 {
556         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
557         struct qat_crypto_instance *inst = NULL;
558         int node = get_current_node();
559         struct device *dev;
560         int ret;
561
562         inst = qat_crypto_get_instance_node(node);
563         if (!inst)
564                 return -EINVAL;
565         dev = &GET_DEV(inst->accel_dev);
566         ctx->inst = inst;
567         ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
568                                          &ctx->enc_cd_paddr,
569                                          GFP_ATOMIC);
570         if (!ctx->enc_cd) {
571                 ret = -ENOMEM;
572                 goto out_free_inst;
573         }
574         ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
575                                          &ctx->dec_cd_paddr,
576                                          GFP_ATOMIC);
577         if (!ctx->dec_cd) {
578                 ret = -ENOMEM;
579                 goto out_free_enc;
580         }
581
582         ret = qat_alg_aead_init_sessions(tfm, key, keylen,
583                                          ICP_QAT_HW_CIPHER_CBC_MODE);
584         if (ret)
585                 goto out_free_all;
586
587         return 0;
588
589 out_free_all:
590         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
591         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
592                           ctx->dec_cd, ctx->dec_cd_paddr);
593         ctx->dec_cd = NULL;
594 out_free_enc:
595         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
596         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
597                           ctx->enc_cd, ctx->enc_cd_paddr);
598         ctx->enc_cd = NULL;
599 out_free_inst:
600         ctx->inst = NULL;
601         qat_crypto_put_instance(inst);
602         return ret;
603 }
604
605 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
606                                unsigned int keylen)
607 {
608         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
609
610         if (ctx->enc_cd)
611                 return qat_alg_aead_rekey(tfm, key, keylen);
612         else
613                 return qat_alg_aead_newkey(tfm, key, keylen);
614 }
615
616 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
617                               struct qat_crypto_request *qat_req)
618 {
619         struct device *dev = &GET_DEV(inst->accel_dev);
620         struct qat_alg_buf_list *bl = qat_req->buf.bl;
621         struct qat_alg_buf_list *blout = qat_req->buf.blout;
622         dma_addr_t blp = qat_req->buf.blp;
623         dma_addr_t blpout = qat_req->buf.bloutp;
624         size_t sz = qat_req->buf.sz;
625         size_t sz_out = qat_req->buf.sz_out;
626         int i;
627
628         for (i = 0; i < bl->num_bufs; i++)
629                 dma_unmap_single(dev, bl->bufers[i].addr,
630                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
631
632         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
633         kfree(bl);
634         if (blp != blpout) {
635                 /* If out of place operation dma unmap only data */
636                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
637
638                 for (i = bufless; i < blout->num_bufs; i++) {
639                         dma_unmap_single(dev, blout->bufers[i].addr,
640                                          blout->bufers[i].len,
641                                          DMA_BIDIRECTIONAL);
642                 }
643                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
644                 kfree(blout);
645         }
646 }
647
648 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
649                                struct scatterlist *sgl,
650                                struct scatterlist *sglout,
651                                struct qat_crypto_request *qat_req)
652 {
653         struct device *dev = &GET_DEV(inst->accel_dev);
654         int i, sg_nctr = 0;
655         int n = sg_nents(sgl);
656         struct qat_alg_buf_list *bufl;
657         struct qat_alg_buf_list *buflout = NULL;
658         dma_addr_t blp;
659         dma_addr_t bloutp = 0;
660         struct scatterlist *sg;
661         size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
662
663         if (unlikely(!n))
664                 return -EINVAL;
665
666         bufl = kzalloc_node(sz, GFP_ATOMIC,
667                             dev_to_node(&GET_DEV(inst->accel_dev)));
668         if (unlikely(!bufl))
669                 return -ENOMEM;
670
671         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
672         if (unlikely(dma_mapping_error(dev, blp)))
673                 goto err_in;
674
675         for_each_sg(sgl, sg, n, i) {
676                 int y = sg_nctr;
677
678                 if (!sg->length)
679                         continue;
680
681                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
682                                                       sg->length,
683                                                       DMA_BIDIRECTIONAL);
684                 bufl->bufers[y].len = sg->length;
685                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
686                         goto err_in;
687                 sg_nctr++;
688         }
689         bufl->num_bufs = sg_nctr;
690         qat_req->buf.bl = bufl;
691         qat_req->buf.blp = blp;
692         qat_req->buf.sz = sz;
693         /* Handle out of place operation */
694         if (sgl != sglout) {
695                 struct qat_alg_buf *bufers;
696
697                 n = sg_nents(sglout);
698                 sz_out = struct_size(buflout, bufers, n + 1);
699                 sg_nctr = 0;
700                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
701                                        dev_to_node(&GET_DEV(inst->accel_dev)));
702                 if (unlikely(!buflout))
703                         goto err_in;
704                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
705                 if (unlikely(dma_mapping_error(dev, bloutp)))
706                         goto err_out;
707                 bufers = buflout->bufers;
708                 for_each_sg(sglout, sg, n, i) {
709                         int y = sg_nctr;
710
711                         if (!sg->length)
712                                 continue;
713
714                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
715                                                         sg->length,
716                                                         DMA_BIDIRECTIONAL);
717                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
718                                 goto err_out;
719                         bufers[y].len = sg->length;
720                         sg_nctr++;
721                 }
722                 buflout->num_bufs = sg_nctr;
723                 buflout->num_mapped_bufs = sg_nctr;
724                 qat_req->buf.blout = buflout;
725                 qat_req->buf.bloutp = bloutp;
726                 qat_req->buf.sz_out = sz_out;
727         } else {
728                 /* Otherwise set the src and dst to the same address */
729                 qat_req->buf.bloutp = qat_req->buf.blp;
730                 qat_req->buf.sz_out = 0;
731         }
732         return 0;
733
734 err_out:
735         n = sg_nents(sglout);
736         for (i = 0; i < n; i++)
737                 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
738                         dma_unmap_single(dev, buflout->bufers[i].addr,
739                                          buflout->bufers[i].len,
740                                          DMA_BIDIRECTIONAL);
741         if (!dma_mapping_error(dev, bloutp))
742                 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
743         kfree(buflout);
744
745 err_in:
746         n = sg_nents(sgl);
747         for (i = 0; i < n; i++)
748                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
749                         dma_unmap_single(dev, bufl->bufers[i].addr,
750                                          bufl->bufers[i].len,
751                                          DMA_BIDIRECTIONAL);
752
753         if (!dma_mapping_error(dev, blp))
754                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
755         kfree(bufl);
756
757         dev_err(dev, "Failed to map buf for dma\n");
758         return -ENOMEM;
759 }
760
761 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
762                                   struct qat_crypto_request *qat_req)
763 {
764         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
765         struct qat_crypto_instance *inst = ctx->inst;
766         struct aead_request *areq = qat_req->aead_req;
767         u8 stat_filed = qat_resp->comn_resp.comn_status;
768         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
769
770         qat_alg_free_bufl(inst, qat_req);
771         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
772                 res = -EBADMSG;
773         areq->base.complete(&areq->base, res);
774 }
775
776 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
777 {
778         struct skcipher_request *sreq = qat_req->skcipher_req;
779         u64 iv_lo_prev;
780         u64 iv_lo;
781         u64 iv_hi;
782
783         memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
784
785         iv_lo = be64_to_cpu(qat_req->iv_lo);
786         iv_hi = be64_to_cpu(qat_req->iv_hi);
787
788         iv_lo_prev = iv_lo;
789         iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
790         if (iv_lo < iv_lo_prev)
791                 iv_hi++;
792
793         qat_req->iv_lo = cpu_to_be64(iv_lo);
794         qat_req->iv_hi = cpu_to_be64(iv_hi);
795 }
796
797 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
798 {
799         struct skcipher_request *sreq = qat_req->skcipher_req;
800         int offset = sreq->cryptlen - AES_BLOCK_SIZE;
801         struct scatterlist *sgl;
802
803         if (qat_req->encryption)
804                 sgl = sreq->dst;
805         else
806                 sgl = sreq->src;
807
808         scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
809 }
810
811 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
812 {
813         struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
814         struct device *dev = &GET_DEV(ctx->inst->accel_dev);
815
816         switch (ctx->mode) {
817         case ICP_QAT_HW_CIPHER_CTR_MODE:
818                 qat_alg_update_iv_ctr_mode(qat_req);
819                 break;
820         case ICP_QAT_HW_CIPHER_CBC_MODE:
821                 qat_alg_update_iv_cbc_mode(qat_req);
822                 break;
823         case ICP_QAT_HW_CIPHER_XTS_MODE:
824                 break;
825         default:
826                 dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
827                          ctx->mode);
828         }
829 }
830
831 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
832                                       struct qat_crypto_request *qat_req)
833 {
834         struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
835         struct qat_crypto_instance *inst = ctx->inst;
836         struct skcipher_request *sreq = qat_req->skcipher_req;
837         u8 stat_filed = qat_resp->comn_resp.comn_status;
838         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
839
840         qat_alg_free_bufl(inst, qat_req);
841         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
842                 res = -EINVAL;
843
844         if (qat_req->encryption)
845                 qat_alg_update_iv(qat_req);
846
847         memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
848
849         sreq->base.complete(&sreq->base, res);
850 }
851
852 void qat_alg_callback(void *resp)
853 {
854         struct icp_qat_fw_la_resp *qat_resp = resp;
855         struct qat_crypto_request *qat_req =
856                                 (void *)(__force long)qat_resp->opaque_data;
857
858         qat_req->cb(qat_resp, qat_req);
859 }
860
861 static int qat_alg_aead_dec(struct aead_request *areq)
862 {
863         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
864         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
865         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
866         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
867         struct icp_qat_fw_la_cipher_req_params *cipher_param;
868         struct icp_qat_fw_la_auth_req_params *auth_param;
869         struct icp_qat_fw_la_bulk_req *msg;
870         int digst_size = crypto_aead_authsize(aead_tfm);
871         int ret, ctr = 0;
872         u32 cipher_len;
873
874         cipher_len = areq->cryptlen - digst_size;
875         if (cipher_len % AES_BLOCK_SIZE != 0)
876                 return -EINVAL;
877
878         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
879         if (unlikely(ret))
880                 return ret;
881
882         msg = &qat_req->req;
883         *msg = ctx->dec_fw_req;
884         qat_req->aead_ctx = ctx;
885         qat_req->aead_req = areq;
886         qat_req->cb = qat_aead_alg_callback;
887         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
888         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
889         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
890         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
891         cipher_param->cipher_length = cipher_len;
892         cipher_param->cipher_offset = areq->assoclen;
893         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
894         auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
895         auth_param->auth_off = 0;
896         auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
897         do {
898                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
899         } while (ret == -EAGAIN && ctr++ < 10);
900
901         if (ret == -EAGAIN) {
902                 qat_alg_free_bufl(ctx->inst, qat_req);
903                 return -EBUSY;
904         }
905         return -EINPROGRESS;
906 }
907
908 static int qat_alg_aead_enc(struct aead_request *areq)
909 {
910         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
911         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
912         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
913         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
914         struct icp_qat_fw_la_cipher_req_params *cipher_param;
915         struct icp_qat_fw_la_auth_req_params *auth_param;
916         struct icp_qat_fw_la_bulk_req *msg;
917         u8 *iv = areq->iv;
918         int ret, ctr = 0;
919
920         if (areq->cryptlen % AES_BLOCK_SIZE != 0)
921                 return -EINVAL;
922
923         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
924         if (unlikely(ret))
925                 return ret;
926
927         msg = &qat_req->req;
928         *msg = ctx->enc_fw_req;
929         qat_req->aead_ctx = ctx;
930         qat_req->aead_req = areq;
931         qat_req->cb = qat_aead_alg_callback;
932         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
933         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
934         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
935         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
936         auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
937
938         memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
939         cipher_param->cipher_length = areq->cryptlen;
940         cipher_param->cipher_offset = areq->assoclen;
941
942         auth_param->auth_off = 0;
943         auth_param->auth_len = areq->assoclen + areq->cryptlen;
944
945         do {
946                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
947         } while (ret == -EAGAIN && ctr++ < 10);
948
949         if (ret == -EAGAIN) {
950                 qat_alg_free_bufl(ctx->inst, qat_req);
951                 return -EBUSY;
952         }
953         return -EINPROGRESS;
954 }
955
956 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
957                                   const u8 *key, unsigned int keylen,
958                                   int mode)
959 {
960         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
961         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
962         memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
963         memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
964
965         return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
966 }
967
968 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
969                                    const u8 *key, unsigned int keylen,
970                                    int mode)
971 {
972         struct qat_crypto_instance *inst = NULL;
973         struct device *dev;
974         int node = get_current_node();
975         int ret;
976
977         inst = qat_crypto_get_instance_node(node);
978         if (!inst)
979                 return -EINVAL;
980         dev = &GET_DEV(inst->accel_dev);
981         ctx->inst = inst;
982         ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
983                                          &ctx->enc_cd_paddr,
984                                          GFP_ATOMIC);
985         if (!ctx->enc_cd) {
986                 ret = -ENOMEM;
987                 goto out_free_instance;
988         }
989         ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
990                                          &ctx->dec_cd_paddr,
991                                          GFP_ATOMIC);
992         if (!ctx->dec_cd) {
993                 ret = -ENOMEM;
994                 goto out_free_enc;
995         }
996
997         ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
998         if (ret)
999                 goto out_free_all;
1000
1001         return 0;
1002
1003 out_free_all:
1004         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1005         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1006                           ctx->dec_cd, ctx->dec_cd_paddr);
1007         ctx->dec_cd = NULL;
1008 out_free_enc:
1009         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1010         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1011                           ctx->enc_cd, ctx->enc_cd_paddr);
1012         ctx->enc_cd = NULL;
1013 out_free_instance:
1014         ctx->inst = NULL;
1015         qat_crypto_put_instance(inst);
1016         return ret;
1017 }
1018
1019 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1020                                    const u8 *key, unsigned int keylen,
1021                                    int mode)
1022 {
1023         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1024
1025         ctx->mode = mode;
1026
1027         if (ctx->enc_cd)
1028                 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1029         else
1030                 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1031 }
1032
1033 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1034                                        const u8 *key, unsigned int keylen)
1035 {
1036         return qat_alg_skcipher_setkey(tfm, key, keylen,
1037                                        ICP_QAT_HW_CIPHER_CBC_MODE);
1038 }
1039
1040 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1041                                        const u8 *key, unsigned int keylen)
1042 {
1043         return qat_alg_skcipher_setkey(tfm, key, keylen,
1044                                        ICP_QAT_HW_CIPHER_CTR_MODE);
1045 }
1046
1047 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1048                                        const u8 *key, unsigned int keylen)
1049 {
1050         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1051         int ret;
1052
1053         ret = xts_verify_key(tfm, key, keylen);
1054         if (ret)
1055                 return ret;
1056
1057         if (keylen >> 1 == AES_KEYSIZE_192) {
1058                 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1059                 if (ret)
1060                         return ret;
1061
1062                 ctx->fallback = true;
1063
1064                 return 0;
1065         }
1066
1067         ctx->fallback = false;
1068
1069         return qat_alg_skcipher_setkey(tfm, key, keylen,
1070                                        ICP_QAT_HW_CIPHER_XTS_MODE);
1071 }
1072
1073 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1074 {
1075         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1076         struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1077         struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1078         struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1079         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1080         struct icp_qat_fw_la_bulk_req *msg;
1081         int ret, ctr = 0;
1082
1083         if (req->cryptlen == 0)
1084                 return 0;
1085
1086         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1087         if (unlikely(ret))
1088                 return ret;
1089
1090         msg = &qat_req->req;
1091         *msg = ctx->enc_fw_req;
1092         qat_req->skcipher_ctx = ctx;
1093         qat_req->skcipher_req = req;
1094         qat_req->cb = qat_skcipher_alg_callback;
1095         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1096         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1097         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1098         qat_req->encryption = true;
1099         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1100         cipher_param->cipher_length = req->cryptlen;
1101         cipher_param->cipher_offset = 0;
1102         memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
1103
1104         do {
1105                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1106         } while (ret == -EAGAIN && ctr++ < 10);
1107
1108         if (ret == -EAGAIN) {
1109                 qat_alg_free_bufl(ctx->inst, qat_req);
1110                 return -EBUSY;
1111         }
1112         return -EINPROGRESS;
1113 }
1114
1115 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1116 {
1117         if (req->cryptlen % AES_BLOCK_SIZE != 0)
1118                 return -EINVAL;
1119
1120         return qat_alg_skcipher_encrypt(req);
1121 }
1122
1123 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1124 {
1125         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1126         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1127         struct skcipher_request *nreq = skcipher_request_ctx(req);
1128
1129         if (req->cryptlen < XTS_BLOCK_SIZE)
1130                 return -EINVAL;
1131
1132         if (ctx->fallback) {
1133                 memcpy(nreq, req, sizeof(*req));
1134                 skcipher_request_set_tfm(nreq, ctx->ftfm);
1135                 return crypto_skcipher_encrypt(nreq);
1136         }
1137
1138         return qat_alg_skcipher_encrypt(req);
1139 }
1140
1141 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1142 {
1143         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1144         struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1145         struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1146         struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1147         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1148         struct icp_qat_fw_la_bulk_req *msg;
1149         int ret, ctr = 0;
1150
1151         if (req->cryptlen == 0)
1152                 return 0;
1153
1154         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1155         if (unlikely(ret))
1156                 return ret;
1157
1158         msg = &qat_req->req;
1159         *msg = ctx->dec_fw_req;
1160         qat_req->skcipher_ctx = ctx;
1161         qat_req->skcipher_req = req;
1162         qat_req->cb = qat_skcipher_alg_callback;
1163         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1164         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1165         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1166         qat_req->encryption = false;
1167         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1168         cipher_param->cipher_length = req->cryptlen;
1169         cipher_param->cipher_offset = 0;
1170         memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
1171
1172         qat_alg_update_iv(qat_req);
1173
1174         do {
1175                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1176         } while (ret == -EAGAIN && ctr++ < 10);
1177
1178         if (ret == -EAGAIN) {
1179                 qat_alg_free_bufl(ctx->inst, qat_req);
1180                 return -EBUSY;
1181         }
1182         return -EINPROGRESS;
1183 }
1184
1185 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1186 {
1187         if (req->cryptlen % AES_BLOCK_SIZE != 0)
1188                 return -EINVAL;
1189
1190         return qat_alg_skcipher_decrypt(req);
1191 }
1192
1193 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1194 {
1195         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1196         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1197         struct skcipher_request *nreq = skcipher_request_ctx(req);
1198
1199         if (req->cryptlen < XTS_BLOCK_SIZE)
1200                 return -EINVAL;
1201
1202         if (ctx->fallback) {
1203                 memcpy(nreq, req, sizeof(*req));
1204                 skcipher_request_set_tfm(nreq, ctx->ftfm);
1205                 return crypto_skcipher_decrypt(nreq);
1206         }
1207
1208         return qat_alg_skcipher_decrypt(req);
1209 }
1210
1211 static int qat_alg_aead_init(struct crypto_aead *tfm,
1212                              enum icp_qat_hw_auth_algo hash,
1213                              const char *hash_name)
1214 {
1215         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1216
1217         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1218         if (IS_ERR(ctx->hash_tfm))
1219                 return PTR_ERR(ctx->hash_tfm);
1220         ctx->qat_hash_alg = hash;
1221         crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1222         return 0;
1223 }
1224
1225 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1226 {
1227         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1228 }
1229
1230 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1231 {
1232         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1233 }
1234
1235 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1236 {
1237         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1238 }
1239
1240 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1241 {
1242         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1243         struct qat_crypto_instance *inst = ctx->inst;
1244         struct device *dev;
1245
1246         crypto_free_shash(ctx->hash_tfm);
1247
1248         if (!inst)
1249                 return;
1250
1251         dev = &GET_DEV(inst->accel_dev);
1252         if (ctx->enc_cd) {
1253                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1254                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1255                                   ctx->enc_cd, ctx->enc_cd_paddr);
1256         }
1257         if (ctx->dec_cd) {
1258                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1259                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1260                                   ctx->dec_cd, ctx->dec_cd_paddr);
1261         }
1262         qat_crypto_put_instance(inst);
1263 }
1264
1265 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1266 {
1267         crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1268         return 0;
1269 }
1270
1271 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1272 {
1273         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1274         int reqsize;
1275
1276         ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1277                                           CRYPTO_ALG_NEED_FALLBACK);
1278         if (IS_ERR(ctx->ftfm))
1279                 return PTR_ERR(ctx->ftfm);
1280
1281         reqsize = max(sizeof(struct qat_crypto_request),
1282                       sizeof(struct skcipher_request) +
1283                       crypto_skcipher_reqsize(ctx->ftfm));
1284         crypto_skcipher_set_reqsize(tfm, reqsize);
1285
1286         return 0;
1287 }
1288
1289 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1290 {
1291         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1292         struct qat_crypto_instance *inst = ctx->inst;
1293         struct device *dev;
1294
1295         if (!inst)
1296                 return;
1297
1298         dev = &GET_DEV(inst->accel_dev);
1299         if (ctx->enc_cd) {
1300                 memset(ctx->enc_cd, 0,
1301                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1302                 dma_free_coherent(dev,
1303                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1304                                   ctx->enc_cd, ctx->enc_cd_paddr);
1305         }
1306         if (ctx->dec_cd) {
1307                 memset(ctx->dec_cd, 0,
1308                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1309                 dma_free_coherent(dev,
1310                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1311                                   ctx->dec_cd, ctx->dec_cd_paddr);
1312         }
1313         qat_crypto_put_instance(inst);
1314 }
1315
1316 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1317 {
1318         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1319
1320         if (ctx->ftfm)
1321                 crypto_free_skcipher(ctx->ftfm);
1322
1323         qat_alg_skcipher_exit_tfm(tfm);
1324 }
1325
1326 static struct aead_alg qat_aeads[] = { {
1327         .base = {
1328                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1329                 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1330                 .cra_priority = 4001,
1331                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1332                 .cra_blocksize = AES_BLOCK_SIZE,
1333                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1334                 .cra_module = THIS_MODULE,
1335         },
1336         .init = qat_alg_aead_sha1_init,
1337         .exit = qat_alg_aead_exit,
1338         .setkey = qat_alg_aead_setkey,
1339         .decrypt = qat_alg_aead_dec,
1340         .encrypt = qat_alg_aead_enc,
1341         .ivsize = AES_BLOCK_SIZE,
1342         .maxauthsize = SHA1_DIGEST_SIZE,
1343 }, {
1344         .base = {
1345                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1346                 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1347                 .cra_priority = 4001,
1348                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1349                 .cra_blocksize = AES_BLOCK_SIZE,
1350                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1351                 .cra_module = THIS_MODULE,
1352         },
1353         .init = qat_alg_aead_sha256_init,
1354         .exit = qat_alg_aead_exit,
1355         .setkey = qat_alg_aead_setkey,
1356         .decrypt = qat_alg_aead_dec,
1357         .encrypt = qat_alg_aead_enc,
1358         .ivsize = AES_BLOCK_SIZE,
1359         .maxauthsize = SHA256_DIGEST_SIZE,
1360 }, {
1361         .base = {
1362                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1363                 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1364                 .cra_priority = 4001,
1365                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1366                 .cra_blocksize = AES_BLOCK_SIZE,
1367                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1368                 .cra_module = THIS_MODULE,
1369         },
1370         .init = qat_alg_aead_sha512_init,
1371         .exit = qat_alg_aead_exit,
1372         .setkey = qat_alg_aead_setkey,
1373         .decrypt = qat_alg_aead_dec,
1374         .encrypt = qat_alg_aead_enc,
1375         .ivsize = AES_BLOCK_SIZE,
1376         .maxauthsize = SHA512_DIGEST_SIZE,
1377 } };
1378
1379 static struct skcipher_alg qat_skciphers[] = { {
1380         .base.cra_name = "cbc(aes)",
1381         .base.cra_driver_name = "qat_aes_cbc",
1382         .base.cra_priority = 4001,
1383         .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1384         .base.cra_blocksize = AES_BLOCK_SIZE,
1385         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1386         .base.cra_alignmask = 0,
1387         .base.cra_module = THIS_MODULE,
1388
1389         .init = qat_alg_skcipher_init_tfm,
1390         .exit = qat_alg_skcipher_exit_tfm,
1391         .setkey = qat_alg_skcipher_cbc_setkey,
1392         .decrypt = qat_alg_skcipher_blk_decrypt,
1393         .encrypt = qat_alg_skcipher_blk_encrypt,
1394         .min_keysize = AES_MIN_KEY_SIZE,
1395         .max_keysize = AES_MAX_KEY_SIZE,
1396         .ivsize = AES_BLOCK_SIZE,
1397 }, {
1398         .base.cra_name = "ctr(aes)",
1399         .base.cra_driver_name = "qat_aes_ctr",
1400         .base.cra_priority = 4001,
1401         .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1402         .base.cra_blocksize = 1,
1403         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1404         .base.cra_alignmask = 0,
1405         .base.cra_module = THIS_MODULE,
1406
1407         .init = qat_alg_skcipher_init_tfm,
1408         .exit = qat_alg_skcipher_exit_tfm,
1409         .setkey = qat_alg_skcipher_ctr_setkey,
1410         .decrypt = qat_alg_skcipher_decrypt,
1411         .encrypt = qat_alg_skcipher_encrypt,
1412         .min_keysize = AES_MIN_KEY_SIZE,
1413         .max_keysize = AES_MAX_KEY_SIZE,
1414         .ivsize = AES_BLOCK_SIZE,
1415 }, {
1416         .base.cra_name = "xts(aes)",
1417         .base.cra_driver_name = "qat_aes_xts",
1418         .base.cra_priority = 4001,
1419         .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1420                           CRYPTO_ALG_ALLOCATES_MEMORY,
1421         .base.cra_blocksize = AES_BLOCK_SIZE,
1422         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1423         .base.cra_alignmask = 0,
1424         .base.cra_module = THIS_MODULE,
1425
1426         .init = qat_alg_skcipher_init_xts_tfm,
1427         .exit = qat_alg_skcipher_exit_xts_tfm,
1428         .setkey = qat_alg_skcipher_xts_setkey,
1429         .decrypt = qat_alg_skcipher_xts_decrypt,
1430         .encrypt = qat_alg_skcipher_xts_encrypt,
1431         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1432         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1433         .ivsize = AES_BLOCK_SIZE,
1434 } };
1435
1436 int qat_algs_register(void)
1437 {
1438         int ret = 0;
1439
1440         mutex_lock(&algs_lock);
1441         if (++active_devs != 1)
1442                 goto unlock;
1443
1444         ret = crypto_register_skciphers(qat_skciphers,
1445                                         ARRAY_SIZE(qat_skciphers));
1446         if (ret)
1447                 goto unlock;
1448
1449         ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1450         if (ret)
1451                 goto unreg_algs;
1452
1453 unlock:
1454         mutex_unlock(&algs_lock);
1455         return ret;
1456
1457 unreg_algs:
1458         crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1459         goto unlock;
1460 }
1461
1462 void qat_algs_unregister(void)
1463 {
1464         mutex_lock(&algs_lock);
1465         if (--active_devs != 0)
1466                 goto unlock;
1467
1468         crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1469         crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1470
1471 unlock:
1472         mutex_unlock(&algs_lock);
1473 }