crypto: qat - add AES-CTR support for QAT GEN4 devices
[linux-2.6-microblaze.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/hash.h>
12 #include <crypto/hmac.h>
13 #include <crypto/algapi.h>
14 #include <crypto/authenc.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/xts.h>
17 #include <linux/dma-mapping.h>
18 #include "adf_accel_devices.h"
19 #include "adf_transport.h"
20 #include "adf_common_drv.h"
21 #include "qat_crypto.h"
22 #include "icp_qat_hw.h"
23 #include "icp_qat_fw.h"
24 #include "icp_qat_fw_la.h"
25
26 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
27         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
28                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
29                                        ICP_QAT_HW_CIPHER_ENCRYPT)
30
31 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
32         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
33                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
34                                        ICP_QAT_HW_CIPHER_DECRYPT)
35
36 #define HW_CAP_AES_V2(accel_dev) \
37         (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
38          ICP_ACCEL_CAPABILITIES_AES_V2)
39
40 static DEFINE_MUTEX(algs_lock);
41 static unsigned int active_devs;
42
43 struct qat_alg_buf {
44         u32 len;
45         u32 resrvd;
46         u64 addr;
47 } __packed;
48
49 struct qat_alg_buf_list {
50         u64 resrvd;
51         u32 num_bufs;
52         u32 num_mapped_bufs;
53         struct qat_alg_buf bufers[];
54 } __packed __aligned(64);
55
56 /* Common content descriptor */
57 struct qat_alg_cd {
58         union {
59                 struct qat_enc { /* Encrypt content desc */
60                         struct icp_qat_hw_cipher_algo_blk cipher;
61                         struct icp_qat_hw_auth_algo_blk hash;
62                 } qat_enc_cd;
63                 struct qat_dec { /* Decrypt content desc */
64                         struct icp_qat_hw_auth_algo_blk hash;
65                         struct icp_qat_hw_cipher_algo_blk cipher;
66                 } qat_dec_cd;
67         };
68 } __aligned(64);
69
70 struct qat_alg_aead_ctx {
71         struct qat_alg_cd *enc_cd;
72         struct qat_alg_cd *dec_cd;
73         dma_addr_t enc_cd_paddr;
74         dma_addr_t dec_cd_paddr;
75         struct icp_qat_fw_la_bulk_req enc_fw_req;
76         struct icp_qat_fw_la_bulk_req dec_fw_req;
77         struct crypto_shash *hash_tfm;
78         enum icp_qat_hw_auth_algo qat_hash_alg;
79         struct qat_crypto_instance *inst;
80         union {
81                 struct sha1_state sha1;
82                 struct sha256_state sha256;
83                 struct sha512_state sha512;
84         };
85         char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
86         char opad[SHA512_BLOCK_SIZE];
87 };
88
89 struct qat_alg_skcipher_ctx {
90         struct icp_qat_hw_cipher_algo_blk *enc_cd;
91         struct icp_qat_hw_cipher_algo_blk *dec_cd;
92         dma_addr_t enc_cd_paddr;
93         dma_addr_t dec_cd_paddr;
94         struct icp_qat_fw_la_bulk_req enc_fw_req;
95         struct icp_qat_fw_la_bulk_req dec_fw_req;
96         struct qat_crypto_instance *inst;
97         struct crypto_skcipher *ftfm;
98         bool fallback;
99         int mode;
100 };
101
102 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
103 {
104         switch (qat_hash_alg) {
105         case ICP_QAT_HW_AUTH_ALGO_SHA1:
106                 return ICP_QAT_HW_SHA1_STATE1_SZ;
107         case ICP_QAT_HW_AUTH_ALGO_SHA256:
108                 return ICP_QAT_HW_SHA256_STATE1_SZ;
109         case ICP_QAT_HW_AUTH_ALGO_SHA512:
110                 return ICP_QAT_HW_SHA512_STATE1_SZ;
111         default:
112                 return -EFAULT;
113         }
114         return -EFAULT;
115 }
116
117 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
118                                   struct qat_alg_aead_ctx *ctx,
119                                   const u8 *auth_key,
120                                   unsigned int auth_keylen)
121 {
122         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
123         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
124         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
125         __be32 *hash_state_out;
126         __be64 *hash512_state_out;
127         int i, offset;
128
129         memset(ctx->ipad, 0, block_size);
130         memset(ctx->opad, 0, block_size);
131         shash->tfm = ctx->hash_tfm;
132
133         if (auth_keylen > block_size) {
134                 int ret = crypto_shash_digest(shash, auth_key,
135                                               auth_keylen, ctx->ipad);
136                 if (ret)
137                         return ret;
138
139                 memcpy(ctx->opad, ctx->ipad, digest_size);
140         } else {
141                 memcpy(ctx->ipad, auth_key, auth_keylen);
142                 memcpy(ctx->opad, auth_key, auth_keylen);
143         }
144
145         for (i = 0; i < block_size; i++) {
146                 char *ipad_ptr = ctx->ipad + i;
147                 char *opad_ptr = ctx->opad + i;
148                 *ipad_ptr ^= HMAC_IPAD_VALUE;
149                 *opad_ptr ^= HMAC_OPAD_VALUE;
150         }
151
152         if (crypto_shash_init(shash))
153                 return -EFAULT;
154
155         if (crypto_shash_update(shash, ctx->ipad, block_size))
156                 return -EFAULT;
157
158         hash_state_out = (__be32 *)hash->sha.state1;
159         hash512_state_out = (__be64 *)hash_state_out;
160
161         switch (ctx->qat_hash_alg) {
162         case ICP_QAT_HW_AUTH_ALGO_SHA1:
163                 if (crypto_shash_export(shash, &ctx->sha1))
164                         return -EFAULT;
165                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
166                         *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
167                 break;
168         case ICP_QAT_HW_AUTH_ALGO_SHA256:
169                 if (crypto_shash_export(shash, &ctx->sha256))
170                         return -EFAULT;
171                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
172                         *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
173                 break;
174         case ICP_QAT_HW_AUTH_ALGO_SHA512:
175                 if (crypto_shash_export(shash, &ctx->sha512))
176                         return -EFAULT;
177                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
178                         *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
179                 break;
180         default:
181                 return -EFAULT;
182         }
183
184         if (crypto_shash_init(shash))
185                 return -EFAULT;
186
187         if (crypto_shash_update(shash, ctx->opad, block_size))
188                 return -EFAULT;
189
190         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
191         if (offset < 0)
192                 return -EFAULT;
193
194         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
195         hash512_state_out = (__be64 *)hash_state_out;
196
197         switch (ctx->qat_hash_alg) {
198         case ICP_QAT_HW_AUTH_ALGO_SHA1:
199                 if (crypto_shash_export(shash, &ctx->sha1))
200                         return -EFAULT;
201                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
202                         *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
203                 break;
204         case ICP_QAT_HW_AUTH_ALGO_SHA256:
205                 if (crypto_shash_export(shash, &ctx->sha256))
206                         return -EFAULT;
207                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208                         *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
209                 break;
210         case ICP_QAT_HW_AUTH_ALGO_SHA512:
211                 if (crypto_shash_export(shash, &ctx->sha512))
212                         return -EFAULT;
213                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
214                         *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
215                 break;
216         default:
217                 return -EFAULT;
218         }
219         memzero_explicit(ctx->ipad, block_size);
220         memzero_explicit(ctx->opad, block_size);
221         return 0;
222 }
223
224 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
225 {
226         header->hdr_flags =
227                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
228         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
229         header->comn_req_flags =
230                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
231                                             QAT_COMN_PTR_TYPE_SGL);
232         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
233                                   ICP_QAT_FW_LA_PARTIAL_NONE);
234         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
235                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
236         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
237                                 ICP_QAT_FW_LA_NO_PROTO);
238         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
239                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
240 }
241
242 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
243                                          int alg,
244                                          struct crypto_authenc_keys *keys,
245                                          int mode)
246 {
247         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
248         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
249         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
250         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
251         struct icp_qat_hw_auth_algo_blk *hash =
252                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
253                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
254         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
255         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
256         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
257         void *ptr = &req_tmpl->cd_ctrl;
258         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
259         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
260
261         /* CD setup */
262         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
263         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
264         hash->sha.inner_setup.auth_config.config =
265                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
266                                              ctx->qat_hash_alg, digestsize);
267         hash->sha.inner_setup.auth_counter.counter =
268                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
269
270         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
271                 return -EFAULT;
272
273         /* Request setup */
274         qat_alg_init_common_hdr(header);
275         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
276         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
277                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
278         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
279                                    ICP_QAT_FW_LA_RET_AUTH_RES);
280         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
281                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
282         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
283         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
284
285         /* Cipher CD config setup */
286         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
287         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
288         cipher_cd_ctrl->cipher_cfg_offset = 0;
289         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
290         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
291         /* Auth CD config setup */
292         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
293         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
294         hash_cd_ctrl->inner_res_sz = digestsize;
295         hash_cd_ctrl->final_sz = digestsize;
296
297         switch (ctx->qat_hash_alg) {
298         case ICP_QAT_HW_AUTH_ALGO_SHA1:
299                 hash_cd_ctrl->inner_state1_sz =
300                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
301                 hash_cd_ctrl->inner_state2_sz =
302                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
303                 break;
304         case ICP_QAT_HW_AUTH_ALGO_SHA256:
305                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
306                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
307                 break;
308         case ICP_QAT_HW_AUTH_ALGO_SHA512:
309                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
310                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
311                 break;
312         default:
313                 break;
314         }
315         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
316                         ((sizeof(struct icp_qat_hw_auth_setup) +
317                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
318         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
319         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
320         return 0;
321 }
322
323 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
324                                          int alg,
325                                          struct crypto_authenc_keys *keys,
326                                          int mode)
327 {
328         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
329         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
330         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
331         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
332         struct icp_qat_hw_cipher_algo_blk *cipher =
333                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
334                 sizeof(struct icp_qat_hw_auth_setup) +
335                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
336         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
337         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
338         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
339         void *ptr = &req_tmpl->cd_ctrl;
340         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
341         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
342         struct icp_qat_fw_la_auth_req_params *auth_param =
343                 (struct icp_qat_fw_la_auth_req_params *)
344                 ((char *)&req_tmpl->serv_specif_rqpars +
345                 sizeof(struct icp_qat_fw_la_cipher_req_params));
346
347         /* CD setup */
348         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
349         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
350         hash->sha.inner_setup.auth_config.config =
351                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
352                                              ctx->qat_hash_alg,
353                                              digestsize);
354         hash->sha.inner_setup.auth_counter.counter =
355                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
356
357         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
358                 return -EFAULT;
359
360         /* Request setup */
361         qat_alg_init_common_hdr(header);
362         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
363         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
364                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
365         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
366                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
367         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
368                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
369         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
370         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
371
372         /* Cipher CD config setup */
373         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
374         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
375         cipher_cd_ctrl->cipher_cfg_offset =
376                 (sizeof(struct icp_qat_hw_auth_setup) +
377                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
378         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
379         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
380
381         /* Auth CD config setup */
382         hash_cd_ctrl->hash_cfg_offset = 0;
383         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
384         hash_cd_ctrl->inner_res_sz = digestsize;
385         hash_cd_ctrl->final_sz = digestsize;
386
387         switch (ctx->qat_hash_alg) {
388         case ICP_QAT_HW_AUTH_ALGO_SHA1:
389                 hash_cd_ctrl->inner_state1_sz =
390                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
391                 hash_cd_ctrl->inner_state2_sz =
392                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
393                 break;
394         case ICP_QAT_HW_AUTH_ALGO_SHA256:
395                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
396                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
397                 break;
398         case ICP_QAT_HW_AUTH_ALGO_SHA512:
399                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
400                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
401                 break;
402         default:
403                 break;
404         }
405
406         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
407                         ((sizeof(struct icp_qat_hw_auth_setup) +
408                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
409         auth_param->auth_res_sz = digestsize;
410         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
411         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
412         return 0;
413 }
414
415 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
416                                       struct icp_qat_fw_la_bulk_req *req,
417                                       struct icp_qat_hw_cipher_algo_blk *cd,
418                                       const u8 *key, unsigned int keylen)
419 {
420         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
421         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
422         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
423         bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
424         int mode = ctx->mode;
425
426         qat_alg_init_common_hdr(header);
427         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
428         cd_pars->u.s.content_desc_params_sz =
429                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
430
431         if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
432                 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
433                                              ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
434                 keylen = round_up(keylen, 16);
435                 memcpy(cd->ucs_aes.key, key, keylen);
436         } else {
437                 memcpy(cd->aes.key, key, keylen);
438         }
439
440         /* Cipher CD config setup */
441         cd_ctrl->cipher_key_sz = keylen >> 3;
442         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
443         cd_ctrl->cipher_cfg_offset = 0;
444         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
445         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
446 }
447
448 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
449                                       int alg, const u8 *key,
450                                       unsigned int keylen, int mode)
451 {
452         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
453         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
454         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
455
456         qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
457         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
458         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
459 }
460
461 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
462                                       int alg, const u8 *key,
463                                       unsigned int keylen, int mode)
464 {
465         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
466         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
467         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
468
469         qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
470         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
471
472         if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
473                 dec_cd->aes.cipher_config.val =
474                                         QAT_AES_HW_CONFIG_DEC(alg, mode);
475         else
476                 dec_cd->aes.cipher_config.val =
477                                         QAT_AES_HW_CONFIG_ENC(alg, mode);
478 }
479
480 static int qat_alg_validate_key(int key_len, int *alg, int mode)
481 {
482         if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
483                 switch (key_len) {
484                 case AES_KEYSIZE_128:
485                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
486                         break;
487                 case AES_KEYSIZE_192:
488                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
489                         break;
490                 case AES_KEYSIZE_256:
491                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
492                         break;
493                 default:
494                         return -EINVAL;
495                 }
496         } else {
497                 switch (key_len) {
498                 case AES_KEYSIZE_128 << 1:
499                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
500                         break;
501                 case AES_KEYSIZE_256 << 1:
502                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
503                         break;
504                 default:
505                         return -EINVAL;
506                 }
507         }
508         return 0;
509 }
510
511 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
512                                       unsigned int keylen,  int mode)
513 {
514         struct crypto_authenc_keys keys;
515         int alg;
516
517         if (crypto_authenc_extractkeys(&keys, key, keylen))
518                 goto bad_key;
519
520         if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
521                 goto bad_key;
522
523         if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
524                 goto error;
525
526         if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
527                 goto error;
528
529         memzero_explicit(&keys, sizeof(keys));
530         return 0;
531 bad_key:
532         memzero_explicit(&keys, sizeof(keys));
533         return -EINVAL;
534 error:
535         memzero_explicit(&keys, sizeof(keys));
536         return -EFAULT;
537 }
538
539 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
540                                           const u8 *key,
541                                           unsigned int keylen,
542                                           int mode)
543 {
544         int alg;
545
546         if (qat_alg_validate_key(keylen, &alg, mode))
547                 return -EINVAL;
548
549         qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
550         qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
551         return 0;
552 }
553
554 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
555                               unsigned int keylen)
556 {
557         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
558
559         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
560         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
561         memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
562         memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
563
564         return qat_alg_aead_init_sessions(tfm, key, keylen,
565                                           ICP_QAT_HW_CIPHER_CBC_MODE);
566 }
567
568 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
569                                unsigned int keylen)
570 {
571         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
572         struct qat_crypto_instance *inst = NULL;
573         int node = get_current_node();
574         struct device *dev;
575         int ret;
576
577         inst = qat_crypto_get_instance_node(node);
578         if (!inst)
579                 return -EINVAL;
580         dev = &GET_DEV(inst->accel_dev);
581         ctx->inst = inst;
582         ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
583                                          &ctx->enc_cd_paddr,
584                                          GFP_ATOMIC);
585         if (!ctx->enc_cd) {
586                 ret = -ENOMEM;
587                 goto out_free_inst;
588         }
589         ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
590                                          &ctx->dec_cd_paddr,
591                                          GFP_ATOMIC);
592         if (!ctx->dec_cd) {
593                 ret = -ENOMEM;
594                 goto out_free_enc;
595         }
596
597         ret = qat_alg_aead_init_sessions(tfm, key, keylen,
598                                          ICP_QAT_HW_CIPHER_CBC_MODE);
599         if (ret)
600                 goto out_free_all;
601
602         return 0;
603
604 out_free_all:
605         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
606         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
607                           ctx->dec_cd, ctx->dec_cd_paddr);
608         ctx->dec_cd = NULL;
609 out_free_enc:
610         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
611         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
612                           ctx->enc_cd, ctx->enc_cd_paddr);
613         ctx->enc_cd = NULL;
614 out_free_inst:
615         ctx->inst = NULL;
616         qat_crypto_put_instance(inst);
617         return ret;
618 }
619
620 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
621                                unsigned int keylen)
622 {
623         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
624
625         if (ctx->enc_cd)
626                 return qat_alg_aead_rekey(tfm, key, keylen);
627         else
628                 return qat_alg_aead_newkey(tfm, key, keylen);
629 }
630
631 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
632                               struct qat_crypto_request *qat_req)
633 {
634         struct device *dev = &GET_DEV(inst->accel_dev);
635         struct qat_alg_buf_list *bl = qat_req->buf.bl;
636         struct qat_alg_buf_list *blout = qat_req->buf.blout;
637         dma_addr_t blp = qat_req->buf.blp;
638         dma_addr_t blpout = qat_req->buf.bloutp;
639         size_t sz = qat_req->buf.sz;
640         size_t sz_out = qat_req->buf.sz_out;
641         int i;
642
643         for (i = 0; i < bl->num_bufs; i++)
644                 dma_unmap_single(dev, bl->bufers[i].addr,
645                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
646
647         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
648         kfree(bl);
649         if (blp != blpout) {
650                 /* If out of place operation dma unmap only data */
651                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
652
653                 for (i = bufless; i < blout->num_bufs; i++) {
654                         dma_unmap_single(dev, blout->bufers[i].addr,
655                                          blout->bufers[i].len,
656                                          DMA_BIDIRECTIONAL);
657                 }
658                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
659                 kfree(blout);
660         }
661 }
662
663 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
664                                struct scatterlist *sgl,
665                                struct scatterlist *sglout,
666                                struct qat_crypto_request *qat_req)
667 {
668         struct device *dev = &GET_DEV(inst->accel_dev);
669         int i, sg_nctr = 0;
670         int n = sg_nents(sgl);
671         struct qat_alg_buf_list *bufl;
672         struct qat_alg_buf_list *buflout = NULL;
673         dma_addr_t blp;
674         dma_addr_t bloutp = 0;
675         struct scatterlist *sg;
676         size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
677
678         if (unlikely(!n))
679                 return -EINVAL;
680
681         bufl = kzalloc_node(sz, GFP_ATOMIC,
682                             dev_to_node(&GET_DEV(inst->accel_dev)));
683         if (unlikely(!bufl))
684                 return -ENOMEM;
685
686         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687         if (unlikely(dma_mapping_error(dev, blp)))
688                 goto err_in;
689
690         for_each_sg(sgl, sg, n, i) {
691                 int y = sg_nctr;
692
693                 if (!sg->length)
694                         continue;
695
696                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
697                                                       sg->length,
698                                                       DMA_BIDIRECTIONAL);
699                 bufl->bufers[y].len = sg->length;
700                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
701                         goto err_in;
702                 sg_nctr++;
703         }
704         bufl->num_bufs = sg_nctr;
705         qat_req->buf.bl = bufl;
706         qat_req->buf.blp = blp;
707         qat_req->buf.sz = sz;
708         /* Handle out of place operation */
709         if (sgl != sglout) {
710                 struct qat_alg_buf *bufers;
711
712                 n = sg_nents(sglout);
713                 sz_out = struct_size(buflout, bufers, n + 1);
714                 sg_nctr = 0;
715                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
716                                        dev_to_node(&GET_DEV(inst->accel_dev)));
717                 if (unlikely(!buflout))
718                         goto err_in;
719                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
720                 if (unlikely(dma_mapping_error(dev, bloutp)))
721                         goto err_out;
722                 bufers = buflout->bufers;
723                 for_each_sg(sglout, sg, n, i) {
724                         int y = sg_nctr;
725
726                         if (!sg->length)
727                                 continue;
728
729                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
730                                                         sg->length,
731                                                         DMA_BIDIRECTIONAL);
732                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
733                                 goto err_out;
734                         bufers[y].len = sg->length;
735                         sg_nctr++;
736                 }
737                 buflout->num_bufs = sg_nctr;
738                 buflout->num_mapped_bufs = sg_nctr;
739                 qat_req->buf.blout = buflout;
740                 qat_req->buf.bloutp = bloutp;
741                 qat_req->buf.sz_out = sz_out;
742         } else {
743                 /* Otherwise set the src and dst to the same address */
744                 qat_req->buf.bloutp = qat_req->buf.blp;
745                 qat_req->buf.sz_out = 0;
746         }
747         return 0;
748
749 err_out:
750         n = sg_nents(sglout);
751         for (i = 0; i < n; i++)
752                 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
753                         dma_unmap_single(dev, buflout->bufers[i].addr,
754                                          buflout->bufers[i].len,
755                                          DMA_BIDIRECTIONAL);
756         if (!dma_mapping_error(dev, bloutp))
757                 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
758         kfree(buflout);
759
760 err_in:
761         n = sg_nents(sgl);
762         for (i = 0; i < n; i++)
763                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
764                         dma_unmap_single(dev, bufl->bufers[i].addr,
765                                          bufl->bufers[i].len,
766                                          DMA_BIDIRECTIONAL);
767
768         if (!dma_mapping_error(dev, blp))
769                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
770         kfree(bufl);
771
772         dev_err(dev, "Failed to map buf for dma\n");
773         return -ENOMEM;
774 }
775
776 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
777                                   struct qat_crypto_request *qat_req)
778 {
779         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
780         struct qat_crypto_instance *inst = ctx->inst;
781         struct aead_request *areq = qat_req->aead_req;
782         u8 stat_filed = qat_resp->comn_resp.comn_status;
783         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
784
785         qat_alg_free_bufl(inst, qat_req);
786         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
787                 res = -EBADMSG;
788         areq->base.complete(&areq->base, res);
789 }
790
791 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
792 {
793         struct skcipher_request *sreq = qat_req->skcipher_req;
794         u64 iv_lo_prev;
795         u64 iv_lo;
796         u64 iv_hi;
797
798         memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
799
800         iv_lo = be64_to_cpu(qat_req->iv_lo);
801         iv_hi = be64_to_cpu(qat_req->iv_hi);
802
803         iv_lo_prev = iv_lo;
804         iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
805         if (iv_lo < iv_lo_prev)
806                 iv_hi++;
807
808         qat_req->iv_lo = cpu_to_be64(iv_lo);
809         qat_req->iv_hi = cpu_to_be64(iv_hi);
810 }
811
812 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
813 {
814         struct skcipher_request *sreq = qat_req->skcipher_req;
815         int offset = sreq->cryptlen - AES_BLOCK_SIZE;
816         struct scatterlist *sgl;
817
818         if (qat_req->encryption)
819                 sgl = sreq->dst;
820         else
821                 sgl = sreq->src;
822
823         scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
824 }
825
826 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
827 {
828         struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
829         struct device *dev = &GET_DEV(ctx->inst->accel_dev);
830
831         switch (ctx->mode) {
832         case ICP_QAT_HW_CIPHER_CTR_MODE:
833                 qat_alg_update_iv_ctr_mode(qat_req);
834                 break;
835         case ICP_QAT_HW_CIPHER_CBC_MODE:
836                 qat_alg_update_iv_cbc_mode(qat_req);
837                 break;
838         case ICP_QAT_HW_CIPHER_XTS_MODE:
839                 break;
840         default:
841                 dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
842                          ctx->mode);
843         }
844 }
845
846 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
847                                       struct qat_crypto_request *qat_req)
848 {
849         struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
850         struct qat_crypto_instance *inst = ctx->inst;
851         struct skcipher_request *sreq = qat_req->skcipher_req;
852         u8 stat_filed = qat_resp->comn_resp.comn_status;
853         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
854
855         qat_alg_free_bufl(inst, qat_req);
856         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
857                 res = -EINVAL;
858
859         if (qat_req->encryption)
860                 qat_alg_update_iv(qat_req);
861
862         memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
863
864         sreq->base.complete(&sreq->base, res);
865 }
866
867 void qat_alg_callback(void *resp)
868 {
869         struct icp_qat_fw_la_resp *qat_resp = resp;
870         struct qat_crypto_request *qat_req =
871                                 (void *)(__force long)qat_resp->opaque_data;
872
873         qat_req->cb(qat_resp, qat_req);
874 }
875
876 static int qat_alg_aead_dec(struct aead_request *areq)
877 {
878         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
879         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
880         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
881         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
882         struct icp_qat_fw_la_cipher_req_params *cipher_param;
883         struct icp_qat_fw_la_auth_req_params *auth_param;
884         struct icp_qat_fw_la_bulk_req *msg;
885         int digst_size = crypto_aead_authsize(aead_tfm);
886         int ret, ctr = 0;
887         u32 cipher_len;
888
889         cipher_len = areq->cryptlen - digst_size;
890         if (cipher_len % AES_BLOCK_SIZE != 0)
891                 return -EINVAL;
892
893         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
894         if (unlikely(ret))
895                 return ret;
896
897         msg = &qat_req->req;
898         *msg = ctx->dec_fw_req;
899         qat_req->aead_ctx = ctx;
900         qat_req->aead_req = areq;
901         qat_req->cb = qat_aead_alg_callback;
902         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
903         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
904         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
905         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
906         cipher_param->cipher_length = cipher_len;
907         cipher_param->cipher_offset = areq->assoclen;
908         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
909         auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
910         auth_param->auth_off = 0;
911         auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
912         do {
913                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
914         } while (ret == -EAGAIN && ctr++ < 10);
915
916         if (ret == -EAGAIN) {
917                 qat_alg_free_bufl(ctx->inst, qat_req);
918                 return -EBUSY;
919         }
920         return -EINPROGRESS;
921 }
922
923 static int qat_alg_aead_enc(struct aead_request *areq)
924 {
925         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
926         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
927         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
928         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
929         struct icp_qat_fw_la_cipher_req_params *cipher_param;
930         struct icp_qat_fw_la_auth_req_params *auth_param;
931         struct icp_qat_fw_la_bulk_req *msg;
932         u8 *iv = areq->iv;
933         int ret, ctr = 0;
934
935         if (areq->cryptlen % AES_BLOCK_SIZE != 0)
936                 return -EINVAL;
937
938         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
939         if (unlikely(ret))
940                 return ret;
941
942         msg = &qat_req->req;
943         *msg = ctx->enc_fw_req;
944         qat_req->aead_ctx = ctx;
945         qat_req->aead_req = areq;
946         qat_req->cb = qat_aead_alg_callback;
947         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
948         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
949         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
950         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
951         auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
952
953         memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
954         cipher_param->cipher_length = areq->cryptlen;
955         cipher_param->cipher_offset = areq->assoclen;
956
957         auth_param->auth_off = 0;
958         auth_param->auth_len = areq->assoclen + areq->cryptlen;
959
960         do {
961                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
962         } while (ret == -EAGAIN && ctr++ < 10);
963
964         if (ret == -EAGAIN) {
965                 qat_alg_free_bufl(ctx->inst, qat_req);
966                 return -EBUSY;
967         }
968         return -EINPROGRESS;
969 }
970
971 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
972                                   const u8 *key, unsigned int keylen,
973                                   int mode)
974 {
975         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
976         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
977         memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
978         memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
979
980         return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
981 }
982
983 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
984                                    const u8 *key, unsigned int keylen,
985                                    int mode)
986 {
987         struct qat_crypto_instance *inst = NULL;
988         struct device *dev;
989         int node = get_current_node();
990         int ret;
991
992         inst = qat_crypto_get_instance_node(node);
993         if (!inst)
994                 return -EINVAL;
995         dev = &GET_DEV(inst->accel_dev);
996         ctx->inst = inst;
997         ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
998                                          &ctx->enc_cd_paddr,
999                                          GFP_ATOMIC);
1000         if (!ctx->enc_cd) {
1001                 ret = -ENOMEM;
1002                 goto out_free_instance;
1003         }
1004         ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
1005                                          &ctx->dec_cd_paddr,
1006                                          GFP_ATOMIC);
1007         if (!ctx->dec_cd) {
1008                 ret = -ENOMEM;
1009                 goto out_free_enc;
1010         }
1011
1012         ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1013         if (ret)
1014                 goto out_free_all;
1015
1016         return 0;
1017
1018 out_free_all:
1019         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1020         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1021                           ctx->dec_cd, ctx->dec_cd_paddr);
1022         ctx->dec_cd = NULL;
1023 out_free_enc:
1024         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1025         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1026                           ctx->enc_cd, ctx->enc_cd_paddr);
1027         ctx->enc_cd = NULL;
1028 out_free_instance:
1029         ctx->inst = NULL;
1030         qat_crypto_put_instance(inst);
1031         return ret;
1032 }
1033
1034 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1035                                    const u8 *key, unsigned int keylen,
1036                                    int mode)
1037 {
1038         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1039
1040         ctx->mode = mode;
1041
1042         if (ctx->enc_cd)
1043                 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1044         else
1045                 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1046 }
1047
1048 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1049                                        const u8 *key, unsigned int keylen)
1050 {
1051         return qat_alg_skcipher_setkey(tfm, key, keylen,
1052                                        ICP_QAT_HW_CIPHER_CBC_MODE);
1053 }
1054
1055 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1056                                        const u8 *key, unsigned int keylen)
1057 {
1058         return qat_alg_skcipher_setkey(tfm, key, keylen,
1059                                        ICP_QAT_HW_CIPHER_CTR_MODE);
1060 }
1061
1062 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1063                                        const u8 *key, unsigned int keylen)
1064 {
1065         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1066         int ret;
1067
1068         ret = xts_verify_key(tfm, key, keylen);
1069         if (ret)
1070                 return ret;
1071
1072         if (keylen >> 1 == AES_KEYSIZE_192) {
1073                 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1074                 if (ret)
1075                         return ret;
1076
1077                 ctx->fallback = true;
1078
1079                 return 0;
1080         }
1081
1082         ctx->fallback = false;
1083
1084         return qat_alg_skcipher_setkey(tfm, key, keylen,
1085                                        ICP_QAT_HW_CIPHER_XTS_MODE);
1086 }
1087
1088 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1089 {
1090         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1091         struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1092         struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1093         struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1094         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1095         struct icp_qat_fw_la_bulk_req *msg;
1096         int ret, ctr = 0;
1097
1098         if (req->cryptlen == 0)
1099                 return 0;
1100
1101         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1102         if (unlikely(ret))
1103                 return ret;
1104
1105         msg = &qat_req->req;
1106         *msg = ctx->enc_fw_req;
1107         qat_req->skcipher_ctx = ctx;
1108         qat_req->skcipher_req = req;
1109         qat_req->cb = qat_skcipher_alg_callback;
1110         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1111         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1112         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1113         qat_req->encryption = true;
1114         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1115         cipher_param->cipher_length = req->cryptlen;
1116         cipher_param->cipher_offset = 0;
1117         memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
1118
1119         do {
1120                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1121         } while (ret == -EAGAIN && ctr++ < 10);
1122
1123         if (ret == -EAGAIN) {
1124                 qat_alg_free_bufl(ctx->inst, qat_req);
1125                 return -EBUSY;
1126         }
1127         return -EINPROGRESS;
1128 }
1129
1130 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1131 {
1132         if (req->cryptlen % AES_BLOCK_SIZE != 0)
1133                 return -EINVAL;
1134
1135         return qat_alg_skcipher_encrypt(req);
1136 }
1137
1138 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1139 {
1140         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1141         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1142         struct skcipher_request *nreq = skcipher_request_ctx(req);
1143
1144         if (req->cryptlen < XTS_BLOCK_SIZE)
1145                 return -EINVAL;
1146
1147         if (ctx->fallback) {
1148                 memcpy(nreq, req, sizeof(*req));
1149                 skcipher_request_set_tfm(nreq, ctx->ftfm);
1150                 return crypto_skcipher_encrypt(nreq);
1151         }
1152
1153         return qat_alg_skcipher_encrypt(req);
1154 }
1155
1156 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1157 {
1158         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1159         struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1160         struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1161         struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1162         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1163         struct icp_qat_fw_la_bulk_req *msg;
1164         int ret, ctr = 0;
1165
1166         if (req->cryptlen == 0)
1167                 return 0;
1168
1169         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1170         if (unlikely(ret))
1171                 return ret;
1172
1173         msg = &qat_req->req;
1174         *msg = ctx->dec_fw_req;
1175         qat_req->skcipher_ctx = ctx;
1176         qat_req->skcipher_req = req;
1177         qat_req->cb = qat_skcipher_alg_callback;
1178         qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1179         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1180         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1181         qat_req->encryption = false;
1182         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1183         cipher_param->cipher_length = req->cryptlen;
1184         cipher_param->cipher_offset = 0;
1185         memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
1186
1187         qat_alg_update_iv(qat_req);
1188
1189         do {
1190                 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1191         } while (ret == -EAGAIN && ctr++ < 10);
1192
1193         if (ret == -EAGAIN) {
1194                 qat_alg_free_bufl(ctx->inst, qat_req);
1195                 return -EBUSY;
1196         }
1197         return -EINPROGRESS;
1198 }
1199
1200 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1201 {
1202         if (req->cryptlen % AES_BLOCK_SIZE != 0)
1203                 return -EINVAL;
1204
1205         return qat_alg_skcipher_decrypt(req);
1206 }
1207
1208 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1209 {
1210         struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1211         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1212         struct skcipher_request *nreq = skcipher_request_ctx(req);
1213
1214         if (req->cryptlen < XTS_BLOCK_SIZE)
1215                 return -EINVAL;
1216
1217         if (ctx->fallback) {
1218                 memcpy(nreq, req, sizeof(*req));
1219                 skcipher_request_set_tfm(nreq, ctx->ftfm);
1220                 return crypto_skcipher_decrypt(nreq);
1221         }
1222
1223         return qat_alg_skcipher_decrypt(req);
1224 }
1225
1226 static int qat_alg_aead_init(struct crypto_aead *tfm,
1227                              enum icp_qat_hw_auth_algo hash,
1228                              const char *hash_name)
1229 {
1230         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1231
1232         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1233         if (IS_ERR(ctx->hash_tfm))
1234                 return PTR_ERR(ctx->hash_tfm);
1235         ctx->qat_hash_alg = hash;
1236         crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1237         return 0;
1238 }
1239
1240 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1241 {
1242         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1243 }
1244
1245 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1246 {
1247         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1248 }
1249
1250 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1251 {
1252         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1253 }
1254
1255 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1256 {
1257         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1258         struct qat_crypto_instance *inst = ctx->inst;
1259         struct device *dev;
1260
1261         crypto_free_shash(ctx->hash_tfm);
1262
1263         if (!inst)
1264                 return;
1265
1266         dev = &GET_DEV(inst->accel_dev);
1267         if (ctx->enc_cd) {
1268                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1269                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1270                                   ctx->enc_cd, ctx->enc_cd_paddr);
1271         }
1272         if (ctx->dec_cd) {
1273                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1274                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1275                                   ctx->dec_cd, ctx->dec_cd_paddr);
1276         }
1277         qat_crypto_put_instance(inst);
1278 }
1279
1280 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1281 {
1282         crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1283         return 0;
1284 }
1285
1286 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1287 {
1288         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1289         int reqsize;
1290
1291         ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1292                                           CRYPTO_ALG_NEED_FALLBACK);
1293         if (IS_ERR(ctx->ftfm))
1294                 return PTR_ERR(ctx->ftfm);
1295
1296         reqsize = max(sizeof(struct qat_crypto_request),
1297                       sizeof(struct skcipher_request) +
1298                       crypto_skcipher_reqsize(ctx->ftfm));
1299         crypto_skcipher_set_reqsize(tfm, reqsize);
1300
1301         return 0;
1302 }
1303
1304 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1305 {
1306         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1307         struct qat_crypto_instance *inst = ctx->inst;
1308         struct device *dev;
1309
1310         if (!inst)
1311                 return;
1312
1313         dev = &GET_DEV(inst->accel_dev);
1314         if (ctx->enc_cd) {
1315                 memset(ctx->enc_cd, 0,
1316                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1317                 dma_free_coherent(dev,
1318                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1319                                   ctx->enc_cd, ctx->enc_cd_paddr);
1320         }
1321         if (ctx->dec_cd) {
1322                 memset(ctx->dec_cd, 0,
1323                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1324                 dma_free_coherent(dev,
1325                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1326                                   ctx->dec_cd, ctx->dec_cd_paddr);
1327         }
1328         qat_crypto_put_instance(inst);
1329 }
1330
1331 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1332 {
1333         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1334
1335         if (ctx->ftfm)
1336                 crypto_free_skcipher(ctx->ftfm);
1337
1338         qat_alg_skcipher_exit_tfm(tfm);
1339 }
1340
1341 static struct aead_alg qat_aeads[] = { {
1342         .base = {
1343                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1344                 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1345                 .cra_priority = 4001,
1346                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1347                 .cra_blocksize = AES_BLOCK_SIZE,
1348                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1349                 .cra_module = THIS_MODULE,
1350         },
1351         .init = qat_alg_aead_sha1_init,
1352         .exit = qat_alg_aead_exit,
1353         .setkey = qat_alg_aead_setkey,
1354         .decrypt = qat_alg_aead_dec,
1355         .encrypt = qat_alg_aead_enc,
1356         .ivsize = AES_BLOCK_SIZE,
1357         .maxauthsize = SHA1_DIGEST_SIZE,
1358 }, {
1359         .base = {
1360                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1361                 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1362                 .cra_priority = 4001,
1363                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1364                 .cra_blocksize = AES_BLOCK_SIZE,
1365                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1366                 .cra_module = THIS_MODULE,
1367         },
1368         .init = qat_alg_aead_sha256_init,
1369         .exit = qat_alg_aead_exit,
1370         .setkey = qat_alg_aead_setkey,
1371         .decrypt = qat_alg_aead_dec,
1372         .encrypt = qat_alg_aead_enc,
1373         .ivsize = AES_BLOCK_SIZE,
1374         .maxauthsize = SHA256_DIGEST_SIZE,
1375 }, {
1376         .base = {
1377                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1378                 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1379                 .cra_priority = 4001,
1380                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1381                 .cra_blocksize = AES_BLOCK_SIZE,
1382                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1383                 .cra_module = THIS_MODULE,
1384         },
1385         .init = qat_alg_aead_sha512_init,
1386         .exit = qat_alg_aead_exit,
1387         .setkey = qat_alg_aead_setkey,
1388         .decrypt = qat_alg_aead_dec,
1389         .encrypt = qat_alg_aead_enc,
1390         .ivsize = AES_BLOCK_SIZE,
1391         .maxauthsize = SHA512_DIGEST_SIZE,
1392 } };
1393
1394 static struct skcipher_alg qat_skciphers[] = { {
1395         .base.cra_name = "cbc(aes)",
1396         .base.cra_driver_name = "qat_aes_cbc",
1397         .base.cra_priority = 4001,
1398         .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1399         .base.cra_blocksize = AES_BLOCK_SIZE,
1400         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1401         .base.cra_alignmask = 0,
1402         .base.cra_module = THIS_MODULE,
1403
1404         .init = qat_alg_skcipher_init_tfm,
1405         .exit = qat_alg_skcipher_exit_tfm,
1406         .setkey = qat_alg_skcipher_cbc_setkey,
1407         .decrypt = qat_alg_skcipher_blk_decrypt,
1408         .encrypt = qat_alg_skcipher_blk_encrypt,
1409         .min_keysize = AES_MIN_KEY_SIZE,
1410         .max_keysize = AES_MAX_KEY_SIZE,
1411         .ivsize = AES_BLOCK_SIZE,
1412 }, {
1413         .base.cra_name = "ctr(aes)",
1414         .base.cra_driver_name = "qat_aes_ctr",
1415         .base.cra_priority = 4001,
1416         .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1417         .base.cra_blocksize = 1,
1418         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1419         .base.cra_alignmask = 0,
1420         .base.cra_module = THIS_MODULE,
1421
1422         .init = qat_alg_skcipher_init_tfm,
1423         .exit = qat_alg_skcipher_exit_tfm,
1424         .setkey = qat_alg_skcipher_ctr_setkey,
1425         .decrypt = qat_alg_skcipher_decrypt,
1426         .encrypt = qat_alg_skcipher_encrypt,
1427         .min_keysize = AES_MIN_KEY_SIZE,
1428         .max_keysize = AES_MAX_KEY_SIZE,
1429         .ivsize = AES_BLOCK_SIZE,
1430 }, {
1431         .base.cra_name = "xts(aes)",
1432         .base.cra_driver_name = "qat_aes_xts",
1433         .base.cra_priority = 4001,
1434         .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1435                           CRYPTO_ALG_ALLOCATES_MEMORY,
1436         .base.cra_blocksize = AES_BLOCK_SIZE,
1437         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1438         .base.cra_alignmask = 0,
1439         .base.cra_module = THIS_MODULE,
1440
1441         .init = qat_alg_skcipher_init_xts_tfm,
1442         .exit = qat_alg_skcipher_exit_xts_tfm,
1443         .setkey = qat_alg_skcipher_xts_setkey,
1444         .decrypt = qat_alg_skcipher_xts_decrypt,
1445         .encrypt = qat_alg_skcipher_xts_encrypt,
1446         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1447         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1448         .ivsize = AES_BLOCK_SIZE,
1449 } };
1450
1451 int qat_algs_register(void)
1452 {
1453         int ret = 0;
1454
1455         mutex_lock(&algs_lock);
1456         if (++active_devs != 1)
1457                 goto unlock;
1458
1459         ret = crypto_register_skciphers(qat_skciphers,
1460                                         ARRAY_SIZE(qat_skciphers));
1461         if (ret)
1462                 goto unlock;
1463
1464         ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1465         if (ret)
1466                 goto unreg_algs;
1467
1468 unlock:
1469         mutex_unlock(&algs_lock);
1470         return ret;
1471
1472 unreg_algs:
1473         crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1474         goto unlock;
1475 }
1476
1477 void qat_algs_unregister(void)
1478 {
1479         mutex_lock(&algs_lock);
1480         if (--active_devs != 0)
1481                 goto unlock;
1482
1483         crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1484         crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1485
1486 unlock:
1487         mutex_unlock(&algs_lock);
1488 }