1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/hash.h>
12 #include <crypto/hmac.h>
13 #include <crypto/algapi.h>
14 #include <crypto/authenc.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/xts.h>
17 #include <linux/dma-mapping.h>
18 #include "adf_accel_devices.h"
19 #include "adf_transport.h"
20 #include "adf_common_drv.h"
21 #include "qat_crypto.h"
22 #include "icp_qat_hw.h"
23 #include "icp_qat_fw.h"
24 #include "icp_qat_fw_la.h"
26 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
27 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
28 ICP_QAT_HW_CIPHER_NO_CONVERT, \
29 ICP_QAT_HW_CIPHER_ENCRYPT)
31 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
32 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
33 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
34 ICP_QAT_HW_CIPHER_DECRYPT)
36 static DEFINE_MUTEX(algs_lock);
37 static unsigned int active_devs;
45 struct qat_alg_buf_list {
49 struct qat_alg_buf bufers[];
50 } __packed __aligned(64);
52 /* Common content descriptor */
55 struct qat_enc { /* Encrypt content desc */
56 struct icp_qat_hw_cipher_algo_blk cipher;
57 struct icp_qat_hw_auth_algo_blk hash;
59 struct qat_dec { /* Decrypt content desc */
60 struct icp_qat_hw_auth_algo_blk hash;
61 struct icp_qat_hw_cipher_algo_blk cipher;
66 struct qat_alg_aead_ctx {
67 struct qat_alg_cd *enc_cd;
68 struct qat_alg_cd *dec_cd;
69 dma_addr_t enc_cd_paddr;
70 dma_addr_t dec_cd_paddr;
71 struct icp_qat_fw_la_bulk_req enc_fw_req;
72 struct icp_qat_fw_la_bulk_req dec_fw_req;
73 struct crypto_shash *hash_tfm;
74 enum icp_qat_hw_auth_algo qat_hash_alg;
75 struct qat_crypto_instance *inst;
77 struct sha1_state sha1;
78 struct sha256_state sha256;
79 struct sha512_state sha512;
81 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
82 char opad[SHA512_BLOCK_SIZE];
85 struct qat_alg_skcipher_ctx {
86 struct icp_qat_hw_cipher_algo_blk *enc_cd;
87 struct icp_qat_hw_cipher_algo_blk *dec_cd;
88 dma_addr_t enc_cd_paddr;
89 dma_addr_t dec_cd_paddr;
90 struct icp_qat_fw_la_bulk_req enc_fw_req;
91 struct icp_qat_fw_la_bulk_req dec_fw_req;
92 struct qat_crypto_instance *inst;
93 struct crypto_skcipher *ftfm;
98 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
100 switch (qat_hash_alg) {
101 case ICP_QAT_HW_AUTH_ALGO_SHA1:
102 return ICP_QAT_HW_SHA1_STATE1_SZ;
103 case ICP_QAT_HW_AUTH_ALGO_SHA256:
104 return ICP_QAT_HW_SHA256_STATE1_SZ;
105 case ICP_QAT_HW_AUTH_ALGO_SHA512:
106 return ICP_QAT_HW_SHA512_STATE1_SZ;
113 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
114 struct qat_alg_aead_ctx *ctx,
116 unsigned int auth_keylen)
118 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
119 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
120 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
121 __be32 *hash_state_out;
122 __be64 *hash512_state_out;
125 memset(ctx->ipad, 0, block_size);
126 memset(ctx->opad, 0, block_size);
127 shash->tfm = ctx->hash_tfm;
129 if (auth_keylen > block_size) {
130 int ret = crypto_shash_digest(shash, auth_key,
131 auth_keylen, ctx->ipad);
135 memcpy(ctx->opad, ctx->ipad, digest_size);
137 memcpy(ctx->ipad, auth_key, auth_keylen);
138 memcpy(ctx->opad, auth_key, auth_keylen);
141 for (i = 0; i < block_size; i++) {
142 char *ipad_ptr = ctx->ipad + i;
143 char *opad_ptr = ctx->opad + i;
144 *ipad_ptr ^= HMAC_IPAD_VALUE;
145 *opad_ptr ^= HMAC_OPAD_VALUE;
148 if (crypto_shash_init(shash))
151 if (crypto_shash_update(shash, ctx->ipad, block_size))
154 hash_state_out = (__be32 *)hash->sha.state1;
155 hash512_state_out = (__be64 *)hash_state_out;
157 switch (ctx->qat_hash_alg) {
158 case ICP_QAT_HW_AUTH_ALGO_SHA1:
159 if (crypto_shash_export(shash, &ctx->sha1))
161 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
162 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
164 case ICP_QAT_HW_AUTH_ALGO_SHA256:
165 if (crypto_shash_export(shash, &ctx->sha256))
167 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
168 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
170 case ICP_QAT_HW_AUTH_ALGO_SHA512:
171 if (crypto_shash_export(shash, &ctx->sha512))
173 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
174 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
180 if (crypto_shash_init(shash))
183 if (crypto_shash_update(shash, ctx->opad, block_size))
186 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
190 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
191 hash512_state_out = (__be64 *)hash_state_out;
193 switch (ctx->qat_hash_alg) {
194 case ICP_QAT_HW_AUTH_ALGO_SHA1:
195 if (crypto_shash_export(shash, &ctx->sha1))
197 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
198 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
200 case ICP_QAT_HW_AUTH_ALGO_SHA256:
201 if (crypto_shash_export(shash, &ctx->sha256))
203 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
204 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
206 case ICP_QAT_HW_AUTH_ALGO_SHA512:
207 if (crypto_shash_export(shash, &ctx->sha512))
209 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
210 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
215 memzero_explicit(ctx->ipad, block_size);
216 memzero_explicit(ctx->opad, block_size);
220 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
223 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
224 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
225 header->comn_req_flags =
226 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
227 QAT_COMN_PTR_TYPE_SGL);
228 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
229 ICP_QAT_FW_LA_PARTIAL_NONE);
230 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
231 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
232 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
233 ICP_QAT_FW_LA_NO_PROTO);
234 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
235 ICP_QAT_FW_LA_NO_UPDATE_STATE);
238 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
240 struct crypto_authenc_keys *keys,
243 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
244 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
245 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
246 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
247 struct icp_qat_hw_auth_algo_blk *hash =
248 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
249 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
250 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
251 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
252 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
253 void *ptr = &req_tmpl->cd_ctrl;
254 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
255 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
258 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
259 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
260 hash->sha.inner_setup.auth_config.config =
261 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
262 ctx->qat_hash_alg, digestsize);
263 hash->sha.inner_setup.auth_counter.counter =
264 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
266 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
270 qat_alg_init_common_hdr(header);
271 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
272 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
273 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
274 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_RET_AUTH_RES);
276 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
278 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
279 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
281 /* Cipher CD config setup */
282 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
283 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
284 cipher_cd_ctrl->cipher_cfg_offset = 0;
285 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
286 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
287 /* Auth CD config setup */
288 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
289 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
290 hash_cd_ctrl->inner_res_sz = digestsize;
291 hash_cd_ctrl->final_sz = digestsize;
293 switch (ctx->qat_hash_alg) {
294 case ICP_QAT_HW_AUTH_ALGO_SHA1:
295 hash_cd_ctrl->inner_state1_sz =
296 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
297 hash_cd_ctrl->inner_state2_sz =
298 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
300 case ICP_QAT_HW_AUTH_ALGO_SHA256:
301 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
302 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
304 case ICP_QAT_HW_AUTH_ALGO_SHA512:
305 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
306 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
311 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
312 ((sizeof(struct icp_qat_hw_auth_setup) +
313 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
314 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
315 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
319 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
321 struct crypto_authenc_keys *keys,
324 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
325 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
326 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
327 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
328 struct icp_qat_hw_cipher_algo_blk *cipher =
329 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
330 sizeof(struct icp_qat_hw_auth_setup) +
331 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
332 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
333 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
334 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
335 void *ptr = &req_tmpl->cd_ctrl;
336 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
337 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
338 struct icp_qat_fw_la_auth_req_params *auth_param =
339 (struct icp_qat_fw_la_auth_req_params *)
340 ((char *)&req_tmpl->serv_specif_rqpars +
341 sizeof(struct icp_qat_fw_la_cipher_req_params));
344 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
345 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
346 hash->sha.inner_setup.auth_config.config =
347 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
350 hash->sha.inner_setup.auth_counter.counter =
351 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
353 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
357 qat_alg_init_common_hdr(header);
358 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
359 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
360 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
361 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
362 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
363 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
364 ICP_QAT_FW_LA_CMP_AUTH_RES);
365 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
366 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
368 /* Cipher CD config setup */
369 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
370 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
371 cipher_cd_ctrl->cipher_cfg_offset =
372 (sizeof(struct icp_qat_hw_auth_setup) +
373 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
374 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
375 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
377 /* Auth CD config setup */
378 hash_cd_ctrl->hash_cfg_offset = 0;
379 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
380 hash_cd_ctrl->inner_res_sz = digestsize;
381 hash_cd_ctrl->final_sz = digestsize;
383 switch (ctx->qat_hash_alg) {
384 case ICP_QAT_HW_AUTH_ALGO_SHA1:
385 hash_cd_ctrl->inner_state1_sz =
386 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
387 hash_cd_ctrl->inner_state2_sz =
388 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
390 case ICP_QAT_HW_AUTH_ALGO_SHA256:
391 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
392 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
394 case ICP_QAT_HW_AUTH_ALGO_SHA512:
395 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
396 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
402 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
403 ((sizeof(struct icp_qat_hw_auth_setup) +
404 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
405 auth_param->auth_res_sz = digestsize;
406 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
407 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
411 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
412 struct icp_qat_fw_la_bulk_req *req,
413 struct icp_qat_hw_cipher_algo_blk *cd,
414 const u8 *key, unsigned int keylen)
416 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
417 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
418 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
420 memcpy(cd->aes.key, key, keylen);
421 qat_alg_init_common_hdr(header);
422 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
423 cd_pars->u.s.content_desc_params_sz =
424 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
425 /* Cipher CD config setup */
426 cd_ctrl->cipher_key_sz = keylen >> 3;
427 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
428 cd_ctrl->cipher_cfg_offset = 0;
429 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
430 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
433 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
434 int alg, const u8 *key,
435 unsigned int keylen, int mode)
437 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
438 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
439 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
441 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
442 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
443 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
446 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
447 int alg, const u8 *key,
448 unsigned int keylen, int mode)
450 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
451 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
452 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
454 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
455 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
457 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
458 dec_cd->aes.cipher_config.val =
459 QAT_AES_HW_CONFIG_DEC(alg, mode);
461 dec_cd->aes.cipher_config.val =
462 QAT_AES_HW_CONFIG_ENC(alg, mode);
465 static int qat_alg_validate_key(int key_len, int *alg, int mode)
467 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
469 case AES_KEYSIZE_128:
470 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
472 case AES_KEYSIZE_192:
473 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
475 case AES_KEYSIZE_256:
476 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
483 case AES_KEYSIZE_128 << 1:
484 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
486 case AES_KEYSIZE_256 << 1:
487 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
496 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
497 unsigned int keylen, int mode)
499 struct crypto_authenc_keys keys;
502 if (crypto_authenc_extractkeys(&keys, key, keylen))
505 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
508 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
511 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
514 memzero_explicit(&keys, sizeof(keys));
517 memzero_explicit(&keys, sizeof(keys));
520 memzero_explicit(&keys, sizeof(keys));
524 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
531 if (qat_alg_validate_key(keylen, &alg, mode))
534 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
535 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
539 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
542 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
544 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
545 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
546 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
547 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
549 return qat_alg_aead_init_sessions(tfm, key, keylen,
550 ICP_QAT_HW_CIPHER_CBC_MODE);
553 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
556 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
557 struct qat_crypto_instance *inst = NULL;
558 int node = get_current_node();
562 inst = qat_crypto_get_instance_node(node);
565 dev = &GET_DEV(inst->accel_dev);
567 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
574 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
582 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
583 ICP_QAT_HW_CIPHER_CBC_MODE);
590 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
591 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
592 ctx->dec_cd, ctx->dec_cd_paddr);
595 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
596 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
597 ctx->enc_cd, ctx->enc_cd_paddr);
601 qat_crypto_put_instance(inst);
605 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
608 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
611 return qat_alg_aead_rekey(tfm, key, keylen);
613 return qat_alg_aead_newkey(tfm, key, keylen);
616 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
617 struct qat_crypto_request *qat_req)
619 struct device *dev = &GET_DEV(inst->accel_dev);
620 struct qat_alg_buf_list *bl = qat_req->buf.bl;
621 struct qat_alg_buf_list *blout = qat_req->buf.blout;
622 dma_addr_t blp = qat_req->buf.blp;
623 dma_addr_t blpout = qat_req->buf.bloutp;
624 size_t sz = qat_req->buf.sz;
625 size_t sz_out = qat_req->buf.sz_out;
628 for (i = 0; i < bl->num_bufs; i++)
629 dma_unmap_single(dev, bl->bufers[i].addr,
630 bl->bufers[i].len, DMA_BIDIRECTIONAL);
632 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
635 /* If out of place operation dma unmap only data */
636 int bufless = blout->num_bufs - blout->num_mapped_bufs;
638 for (i = bufless; i < blout->num_bufs; i++) {
639 dma_unmap_single(dev, blout->bufers[i].addr,
640 blout->bufers[i].len,
643 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
648 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
649 struct scatterlist *sgl,
650 struct scatterlist *sglout,
651 struct qat_crypto_request *qat_req)
653 struct device *dev = &GET_DEV(inst->accel_dev);
655 int n = sg_nents(sgl);
656 struct qat_alg_buf_list *bufl;
657 struct qat_alg_buf_list *buflout = NULL;
659 dma_addr_t bloutp = 0;
660 struct scatterlist *sg;
661 size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
666 bufl = kzalloc_node(sz, GFP_ATOMIC,
667 dev_to_node(&GET_DEV(inst->accel_dev)));
671 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
672 if (unlikely(dma_mapping_error(dev, blp)))
675 for_each_sg(sgl, sg, n, i) {
681 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
684 bufl->bufers[y].len = sg->length;
685 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
689 bufl->num_bufs = sg_nctr;
690 qat_req->buf.bl = bufl;
691 qat_req->buf.blp = blp;
692 qat_req->buf.sz = sz;
693 /* Handle out of place operation */
695 struct qat_alg_buf *bufers;
697 n = sg_nents(sglout);
698 sz_out = struct_size(buflout, bufers, n + 1);
700 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
701 dev_to_node(&GET_DEV(inst->accel_dev)));
702 if (unlikely(!buflout))
704 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
705 if (unlikely(dma_mapping_error(dev, bloutp)))
707 bufers = buflout->bufers;
708 for_each_sg(sglout, sg, n, i) {
714 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
717 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
719 bufers[y].len = sg->length;
722 buflout->num_bufs = sg_nctr;
723 buflout->num_mapped_bufs = sg_nctr;
724 qat_req->buf.blout = buflout;
725 qat_req->buf.bloutp = bloutp;
726 qat_req->buf.sz_out = sz_out;
728 /* Otherwise set the src and dst to the same address */
729 qat_req->buf.bloutp = qat_req->buf.blp;
730 qat_req->buf.sz_out = 0;
735 n = sg_nents(sglout);
736 for (i = 0; i < n; i++)
737 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
738 dma_unmap_single(dev, buflout->bufers[i].addr,
739 buflout->bufers[i].len,
741 if (!dma_mapping_error(dev, bloutp))
742 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
747 for (i = 0; i < n; i++)
748 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
749 dma_unmap_single(dev, bufl->bufers[i].addr,
753 if (!dma_mapping_error(dev, blp))
754 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
757 dev_err(dev, "Failed to map buf for dma\n");
761 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
762 struct qat_crypto_request *qat_req)
764 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
765 struct qat_crypto_instance *inst = ctx->inst;
766 struct aead_request *areq = qat_req->aead_req;
767 u8 stat_filed = qat_resp->comn_resp.comn_status;
768 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
770 qat_alg_free_bufl(inst, qat_req);
771 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
773 areq->base.complete(&areq->base, res);
776 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
778 struct skcipher_request *sreq = qat_req->skcipher_req;
783 memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
785 iv_lo = be64_to_cpu(qat_req->iv_lo);
786 iv_hi = be64_to_cpu(qat_req->iv_hi);
789 iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
790 if (iv_lo < iv_lo_prev)
793 qat_req->iv_lo = cpu_to_be64(iv_lo);
794 qat_req->iv_hi = cpu_to_be64(iv_hi);
797 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
799 struct skcipher_request *sreq = qat_req->skcipher_req;
800 int offset = sreq->cryptlen - AES_BLOCK_SIZE;
801 struct scatterlist *sgl;
803 if (qat_req->encryption)
808 scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
811 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
813 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
814 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
817 case ICP_QAT_HW_CIPHER_CTR_MODE:
818 qat_alg_update_iv_ctr_mode(qat_req);
820 case ICP_QAT_HW_CIPHER_CBC_MODE:
821 qat_alg_update_iv_cbc_mode(qat_req);
823 case ICP_QAT_HW_CIPHER_XTS_MODE:
826 dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
831 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
832 struct qat_crypto_request *qat_req)
834 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
835 struct qat_crypto_instance *inst = ctx->inst;
836 struct skcipher_request *sreq = qat_req->skcipher_req;
837 u8 stat_filed = qat_resp->comn_resp.comn_status;
838 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
840 qat_alg_free_bufl(inst, qat_req);
841 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
844 if (qat_req->encryption)
845 qat_alg_update_iv(qat_req);
847 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
849 sreq->base.complete(&sreq->base, res);
852 void qat_alg_callback(void *resp)
854 struct icp_qat_fw_la_resp *qat_resp = resp;
855 struct qat_crypto_request *qat_req =
856 (void *)(__force long)qat_resp->opaque_data;
858 qat_req->cb(qat_resp, qat_req);
861 static int qat_alg_aead_dec(struct aead_request *areq)
863 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
864 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
865 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
866 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
867 struct icp_qat_fw_la_cipher_req_params *cipher_param;
868 struct icp_qat_fw_la_auth_req_params *auth_param;
869 struct icp_qat_fw_la_bulk_req *msg;
870 int digst_size = crypto_aead_authsize(aead_tfm);
874 cipher_len = areq->cryptlen - digst_size;
875 if (cipher_len % AES_BLOCK_SIZE != 0)
878 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
883 *msg = ctx->dec_fw_req;
884 qat_req->aead_ctx = ctx;
885 qat_req->aead_req = areq;
886 qat_req->cb = qat_aead_alg_callback;
887 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
888 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
889 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
890 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
891 cipher_param->cipher_length = cipher_len;
892 cipher_param->cipher_offset = areq->assoclen;
893 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
894 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
895 auth_param->auth_off = 0;
896 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
898 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
899 } while (ret == -EAGAIN && ctr++ < 10);
901 if (ret == -EAGAIN) {
902 qat_alg_free_bufl(ctx->inst, qat_req);
908 static int qat_alg_aead_enc(struct aead_request *areq)
910 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
911 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
912 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
913 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
914 struct icp_qat_fw_la_cipher_req_params *cipher_param;
915 struct icp_qat_fw_la_auth_req_params *auth_param;
916 struct icp_qat_fw_la_bulk_req *msg;
920 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
923 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
928 *msg = ctx->enc_fw_req;
929 qat_req->aead_ctx = ctx;
930 qat_req->aead_req = areq;
931 qat_req->cb = qat_aead_alg_callback;
932 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
933 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
934 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
935 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
936 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
938 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
939 cipher_param->cipher_length = areq->cryptlen;
940 cipher_param->cipher_offset = areq->assoclen;
942 auth_param->auth_off = 0;
943 auth_param->auth_len = areq->assoclen + areq->cryptlen;
946 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
947 } while (ret == -EAGAIN && ctr++ < 10);
949 if (ret == -EAGAIN) {
950 qat_alg_free_bufl(ctx->inst, qat_req);
956 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
957 const u8 *key, unsigned int keylen,
960 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
961 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
962 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
963 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
965 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
968 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
969 const u8 *key, unsigned int keylen,
972 struct qat_crypto_instance *inst = NULL;
974 int node = get_current_node();
977 inst = qat_crypto_get_instance_node(node);
980 dev = &GET_DEV(inst->accel_dev);
982 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
987 goto out_free_instance;
989 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
997 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1004 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1005 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1006 ctx->dec_cd, ctx->dec_cd_paddr);
1009 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1010 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1011 ctx->enc_cd, ctx->enc_cd_paddr);
1015 qat_crypto_put_instance(inst);
1019 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1020 const u8 *key, unsigned int keylen,
1023 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1028 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1030 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1033 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1034 const u8 *key, unsigned int keylen)
1036 return qat_alg_skcipher_setkey(tfm, key, keylen,
1037 ICP_QAT_HW_CIPHER_CBC_MODE);
1040 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1041 const u8 *key, unsigned int keylen)
1043 return qat_alg_skcipher_setkey(tfm, key, keylen,
1044 ICP_QAT_HW_CIPHER_CTR_MODE);
1047 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1048 const u8 *key, unsigned int keylen)
1050 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1053 ret = xts_verify_key(tfm, key, keylen);
1057 if (keylen >> 1 == AES_KEYSIZE_192) {
1058 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1062 ctx->fallback = true;
1067 ctx->fallback = false;
1069 return qat_alg_skcipher_setkey(tfm, key, keylen,
1070 ICP_QAT_HW_CIPHER_XTS_MODE);
1073 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1075 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1076 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1077 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1078 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1079 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1080 struct icp_qat_fw_la_bulk_req *msg;
1083 if (req->cryptlen == 0)
1086 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1090 msg = &qat_req->req;
1091 *msg = ctx->enc_fw_req;
1092 qat_req->skcipher_ctx = ctx;
1093 qat_req->skcipher_req = req;
1094 qat_req->cb = qat_skcipher_alg_callback;
1095 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1096 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1097 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1098 qat_req->encryption = true;
1099 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1100 cipher_param->cipher_length = req->cryptlen;
1101 cipher_param->cipher_offset = 0;
1102 memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
1105 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1106 } while (ret == -EAGAIN && ctr++ < 10);
1108 if (ret == -EAGAIN) {
1109 qat_alg_free_bufl(ctx->inst, qat_req);
1112 return -EINPROGRESS;
1115 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1117 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1120 return qat_alg_skcipher_encrypt(req);
1123 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1125 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1126 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1127 struct skcipher_request *nreq = skcipher_request_ctx(req);
1129 if (req->cryptlen < XTS_BLOCK_SIZE)
1132 if (ctx->fallback) {
1133 memcpy(nreq, req, sizeof(*req));
1134 skcipher_request_set_tfm(nreq, ctx->ftfm);
1135 return crypto_skcipher_encrypt(nreq);
1138 return qat_alg_skcipher_encrypt(req);
1141 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1143 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1144 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1145 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1146 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1147 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1148 struct icp_qat_fw_la_bulk_req *msg;
1151 if (req->cryptlen == 0)
1154 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1158 msg = &qat_req->req;
1159 *msg = ctx->dec_fw_req;
1160 qat_req->skcipher_ctx = ctx;
1161 qat_req->skcipher_req = req;
1162 qat_req->cb = qat_skcipher_alg_callback;
1163 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1164 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1165 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1166 qat_req->encryption = false;
1167 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1168 cipher_param->cipher_length = req->cryptlen;
1169 cipher_param->cipher_offset = 0;
1170 memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
1172 qat_alg_update_iv(qat_req);
1175 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1176 } while (ret == -EAGAIN && ctr++ < 10);
1178 if (ret == -EAGAIN) {
1179 qat_alg_free_bufl(ctx->inst, qat_req);
1182 return -EINPROGRESS;
1185 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1187 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1190 return qat_alg_skcipher_decrypt(req);
1193 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1195 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1196 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1197 struct skcipher_request *nreq = skcipher_request_ctx(req);
1199 if (req->cryptlen < XTS_BLOCK_SIZE)
1202 if (ctx->fallback) {
1203 memcpy(nreq, req, sizeof(*req));
1204 skcipher_request_set_tfm(nreq, ctx->ftfm);
1205 return crypto_skcipher_decrypt(nreq);
1208 return qat_alg_skcipher_decrypt(req);
1211 static int qat_alg_aead_init(struct crypto_aead *tfm,
1212 enum icp_qat_hw_auth_algo hash,
1213 const char *hash_name)
1215 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1217 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1218 if (IS_ERR(ctx->hash_tfm))
1219 return PTR_ERR(ctx->hash_tfm);
1220 ctx->qat_hash_alg = hash;
1221 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1225 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1227 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1230 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1232 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1235 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1237 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1240 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1242 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1243 struct qat_crypto_instance *inst = ctx->inst;
1246 crypto_free_shash(ctx->hash_tfm);
1251 dev = &GET_DEV(inst->accel_dev);
1253 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1254 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1255 ctx->enc_cd, ctx->enc_cd_paddr);
1258 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1259 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1260 ctx->dec_cd, ctx->dec_cd_paddr);
1262 qat_crypto_put_instance(inst);
1265 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1267 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1271 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1273 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1276 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1277 CRYPTO_ALG_NEED_FALLBACK);
1278 if (IS_ERR(ctx->ftfm))
1279 return PTR_ERR(ctx->ftfm);
1281 reqsize = max(sizeof(struct qat_crypto_request),
1282 sizeof(struct skcipher_request) +
1283 crypto_skcipher_reqsize(ctx->ftfm));
1284 crypto_skcipher_set_reqsize(tfm, reqsize);
1289 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1291 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1292 struct qat_crypto_instance *inst = ctx->inst;
1298 dev = &GET_DEV(inst->accel_dev);
1300 memset(ctx->enc_cd, 0,
1301 sizeof(struct icp_qat_hw_cipher_algo_blk));
1302 dma_free_coherent(dev,
1303 sizeof(struct icp_qat_hw_cipher_algo_blk),
1304 ctx->enc_cd, ctx->enc_cd_paddr);
1307 memset(ctx->dec_cd, 0,
1308 sizeof(struct icp_qat_hw_cipher_algo_blk));
1309 dma_free_coherent(dev,
1310 sizeof(struct icp_qat_hw_cipher_algo_blk),
1311 ctx->dec_cd, ctx->dec_cd_paddr);
1313 qat_crypto_put_instance(inst);
1316 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1318 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1321 crypto_free_skcipher(ctx->ftfm);
1323 qat_alg_skcipher_exit_tfm(tfm);
1326 static struct aead_alg qat_aeads[] = { {
1328 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1329 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1330 .cra_priority = 4001,
1331 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1332 .cra_blocksize = AES_BLOCK_SIZE,
1333 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1334 .cra_module = THIS_MODULE,
1336 .init = qat_alg_aead_sha1_init,
1337 .exit = qat_alg_aead_exit,
1338 .setkey = qat_alg_aead_setkey,
1339 .decrypt = qat_alg_aead_dec,
1340 .encrypt = qat_alg_aead_enc,
1341 .ivsize = AES_BLOCK_SIZE,
1342 .maxauthsize = SHA1_DIGEST_SIZE,
1345 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1346 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1347 .cra_priority = 4001,
1348 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1349 .cra_blocksize = AES_BLOCK_SIZE,
1350 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1351 .cra_module = THIS_MODULE,
1353 .init = qat_alg_aead_sha256_init,
1354 .exit = qat_alg_aead_exit,
1355 .setkey = qat_alg_aead_setkey,
1356 .decrypt = qat_alg_aead_dec,
1357 .encrypt = qat_alg_aead_enc,
1358 .ivsize = AES_BLOCK_SIZE,
1359 .maxauthsize = SHA256_DIGEST_SIZE,
1362 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1363 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1364 .cra_priority = 4001,
1365 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1366 .cra_blocksize = AES_BLOCK_SIZE,
1367 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1368 .cra_module = THIS_MODULE,
1370 .init = qat_alg_aead_sha512_init,
1371 .exit = qat_alg_aead_exit,
1372 .setkey = qat_alg_aead_setkey,
1373 .decrypt = qat_alg_aead_dec,
1374 .encrypt = qat_alg_aead_enc,
1375 .ivsize = AES_BLOCK_SIZE,
1376 .maxauthsize = SHA512_DIGEST_SIZE,
1379 static struct skcipher_alg qat_skciphers[] = { {
1380 .base.cra_name = "cbc(aes)",
1381 .base.cra_driver_name = "qat_aes_cbc",
1382 .base.cra_priority = 4001,
1383 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1384 .base.cra_blocksize = AES_BLOCK_SIZE,
1385 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1386 .base.cra_alignmask = 0,
1387 .base.cra_module = THIS_MODULE,
1389 .init = qat_alg_skcipher_init_tfm,
1390 .exit = qat_alg_skcipher_exit_tfm,
1391 .setkey = qat_alg_skcipher_cbc_setkey,
1392 .decrypt = qat_alg_skcipher_blk_decrypt,
1393 .encrypt = qat_alg_skcipher_blk_encrypt,
1394 .min_keysize = AES_MIN_KEY_SIZE,
1395 .max_keysize = AES_MAX_KEY_SIZE,
1396 .ivsize = AES_BLOCK_SIZE,
1398 .base.cra_name = "ctr(aes)",
1399 .base.cra_driver_name = "qat_aes_ctr",
1400 .base.cra_priority = 4001,
1401 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1402 .base.cra_blocksize = 1,
1403 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1404 .base.cra_alignmask = 0,
1405 .base.cra_module = THIS_MODULE,
1407 .init = qat_alg_skcipher_init_tfm,
1408 .exit = qat_alg_skcipher_exit_tfm,
1409 .setkey = qat_alg_skcipher_ctr_setkey,
1410 .decrypt = qat_alg_skcipher_decrypt,
1411 .encrypt = qat_alg_skcipher_encrypt,
1412 .min_keysize = AES_MIN_KEY_SIZE,
1413 .max_keysize = AES_MAX_KEY_SIZE,
1414 .ivsize = AES_BLOCK_SIZE,
1416 .base.cra_name = "xts(aes)",
1417 .base.cra_driver_name = "qat_aes_xts",
1418 .base.cra_priority = 4001,
1419 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1420 CRYPTO_ALG_ALLOCATES_MEMORY,
1421 .base.cra_blocksize = AES_BLOCK_SIZE,
1422 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1423 .base.cra_alignmask = 0,
1424 .base.cra_module = THIS_MODULE,
1426 .init = qat_alg_skcipher_init_xts_tfm,
1427 .exit = qat_alg_skcipher_exit_xts_tfm,
1428 .setkey = qat_alg_skcipher_xts_setkey,
1429 .decrypt = qat_alg_skcipher_xts_decrypt,
1430 .encrypt = qat_alg_skcipher_xts_encrypt,
1431 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1432 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1433 .ivsize = AES_BLOCK_SIZE,
1436 int qat_algs_register(void)
1440 mutex_lock(&algs_lock);
1441 if (++active_devs != 1)
1444 ret = crypto_register_skciphers(qat_skciphers,
1445 ARRAY_SIZE(qat_skciphers));
1449 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1454 mutex_unlock(&algs_lock);
1458 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1462 void qat_algs_unregister(void)
1464 mutex_lock(&algs_lock);
1465 if (--active_devs != 0)
1468 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1469 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1472 mutex_unlock(&algs_lock);