1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
11 * You could find the datasheet in Documentation/arm/sunxi.rst
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
23 void *backup_iv = NULL;
24 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
25 u32 rx_cnt = SS_RX_DEFAULT;
31 unsigned int ileft = areq->cryptlen;
32 unsigned int oleft = areq->cryptlen;
34 unsigned long pi = 0, po = 0; /* progress for in and out */
36 struct sg_mapping_iter mi, mo;
37 unsigned int oi, oo; /* offset for in and out */
39 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
40 struct sun4i_ss_alg_template *algt;
45 if (!areq->src || !areq->dst) {
46 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
50 if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
51 backup_iv = kzalloc(ivsize, GFP_KERNEL);
54 scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
57 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
58 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
60 algt->stat_bytes += areq->cryptlen;
63 spin_lock_irqsave(&ss->slock, flags);
65 for (i = 0; i < op->keylen / 4; i++)
66 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
69 for (i = 0; i < 4 && i < ivsize / 4; i++) {
70 v = *(u32 *)(areq->iv + i * 4);
71 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
74 writel(mode, ss->base + SS_CTL);
77 ileft = areq->cryptlen / 4;
78 oleft = areq->cryptlen / 4;
83 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
84 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
86 sg_miter_skip(&mi, pi);
87 miter_err = sg_miter_next(&mi);
88 if (!miter_err || !mi.addr) {
89 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
93 todo = min(rx_cnt, ileft);
94 todo = min_t(size_t, todo, (mi.length - oi) / 4);
97 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
100 if (oi == mi.length) {
107 spaces = readl(ss->base + SS_FCSR);
108 rx_cnt = SS_RXFIFO_SPACES(spaces);
109 tx_cnt = SS_TXFIFO_SPACES(spaces);
111 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
112 SG_MITER_TO_SG | SG_MITER_ATOMIC);
114 sg_miter_skip(&mo, po);
115 miter_err = sg_miter_next(&mo);
116 if (!miter_err || !mo.addr) {
117 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
121 todo = min(tx_cnt, oleft);
122 todo = min_t(size_t, todo, (mo.length - oo) / 4);
125 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
128 if (oo == mo.length) {
136 if (mode & SS_DECRYPTION) {
137 memcpy(areq->iv, backup_iv, ivsize);
138 kfree_sensitive(backup_iv);
140 scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
146 writel(0, ss->base + SS_CTL);
147 spin_unlock_irqrestore(&ss->slock, flags);
152 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
154 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
155 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
156 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
158 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
159 struct sun4i_ss_alg_template *algt;
161 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
162 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
166 skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
167 skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
168 areq->base.complete, areq->base.data);
169 skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
170 areq->cryptlen, areq->iv);
171 if (ctx->mode & SS_DECRYPTION)
172 err = crypto_skcipher_decrypt(&ctx->fallback_req);
174 err = crypto_skcipher_encrypt(&ctx->fallback_req);
179 /* Generic function that support SG with size not multiple of 4 */
180 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
182 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
183 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
184 struct sun4i_ss_ctx *ss = op->ss;
186 struct scatterlist *in_sg = areq->src;
187 struct scatterlist *out_sg = areq->dst;
188 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
189 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
190 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
191 struct sun4i_ss_alg_template *algt;
192 u32 mode = ctx->mode;
193 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
194 u32 rx_cnt = SS_RX_DEFAULT;
200 unsigned int ileft = areq->cryptlen;
201 unsigned int oleft = areq->cryptlen;
203 void *backup_iv = NULL;
204 struct sg_mapping_iter mi, mo;
205 unsigned long pi = 0, po = 0; /* progress for in and out */
207 unsigned int oi, oo; /* offset for in and out */
208 unsigned int ob = 0; /* offset in buf */
209 unsigned int obo = 0; /* offset in bufo*/
210 unsigned int obl = 0; /* length of data in bufo */
212 bool need_fallback = false;
217 if (!areq->src || !areq->dst) {
218 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
222 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
223 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
224 need_fallback = true;
227 * if we have only SGs with size multiple of 4,
228 * we can use the SS optimized function
230 while (in_sg && no_chunk == 1) {
231 if ((in_sg->length | in_sg->offset) & 3u)
233 in_sg = sg_next(in_sg);
235 while (out_sg && no_chunk == 1) {
236 if ((out_sg->length | out_sg->offset) & 3u)
238 out_sg = sg_next(out_sg);
241 if (no_chunk == 1 && !need_fallback)
242 return sun4i_ss_opti_poll(areq);
245 return sun4i_ss_cipher_poll_fallback(areq);
247 if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
248 backup_iv = kzalloc(ivsize, GFP_KERNEL);
251 scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
254 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
256 algt->stat_bytes += areq->cryptlen;
259 spin_lock_irqsave(&ss->slock, flags);
261 for (i = 0; i < op->keylen / 4; i++)
262 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
265 for (i = 0; i < 4 && i < ivsize / 4; i++) {
266 v = *(u32 *)(areq->iv + i * 4);
267 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
270 writel(mode, ss->base + SS_CTL);
272 ileft = areq->cryptlen;
273 oleft = areq->cryptlen;
279 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
280 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
282 sg_miter_skip(&mi, pi);
283 miter_err = sg_miter_next(&mi);
284 if (!miter_err || !mi.addr) {
285 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
290 * todo is the number of consecutive 4byte word that we
291 * can read from current SG
293 todo = min(rx_cnt, ileft / 4);
294 todo = min_t(size_t, todo, (mi.length - oi) / 4);
296 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
302 * not enough consecutive bytes, so we need to
303 * linearize in buf. todo is in bytes
304 * After that copy, if we have a multiple of 4
305 * we need to be able to write all buf in one
306 * pass, so it is why we min() with rx_cnt
308 todo = min(rx_cnt * 4 - ob, ileft);
309 todo = min_t(size_t, todo, mi.length - oi);
310 memcpy(ss->buf + ob, mi.addr + oi, todo);
315 writesl(ss->base + SS_RXFIFO, ss->buf,
320 if (oi == mi.length) {
327 spaces = readl(ss->base + SS_FCSR);
328 rx_cnt = SS_RXFIFO_SPACES(spaces);
329 tx_cnt = SS_TXFIFO_SPACES(spaces);
333 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
334 SG_MITER_TO_SG | SG_MITER_ATOMIC);
336 sg_miter_skip(&mo, po);
337 miter_err = sg_miter_next(&mo);
338 if (!miter_err || !mo.addr) {
339 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
343 /* todo in 4bytes word */
344 todo = min(tx_cnt, oleft / 4);
345 todo = min_t(size_t, todo, (mo.length - oo) / 4);
348 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
351 if (oo == mo.length) {
357 * read obl bytes in bufo, we read at maximum for
358 * emptying the device
360 readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
365 * how many bytes we can copy ?
366 * no more than remaining SG size
367 * no more than remaining buffer
368 * no need to test against oleft
371 mo.length - oo, obl - obo);
372 memcpy(mo.addr + oo, ss->bufo + obo, todo);
376 if (oo == mo.length) {
382 /* bufo must be fully used here */
387 if (mode & SS_DECRYPTION) {
388 memcpy(areq->iv, backup_iv, ivsize);
389 kfree_sensitive(backup_iv);
391 scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
397 writel(0, ss->base + SS_CTL);
398 spin_unlock_irqrestore(&ss->slock, flags);
404 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
406 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
407 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
408 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
410 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
412 return sun4i_ss_cipher_poll(areq);
415 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
417 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
418 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
419 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
421 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
423 return sun4i_ss_cipher_poll(areq);
427 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
430 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
433 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
435 return sun4i_ss_cipher_poll(areq);
438 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
440 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
441 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
442 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
444 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
446 return sun4i_ss_cipher_poll(areq);
450 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
452 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
453 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
454 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
456 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
458 return sun4i_ss_cipher_poll(areq);
461 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
463 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
464 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
465 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
467 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
469 return sun4i_ss_cipher_poll(areq);
473 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
475 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
476 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
477 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
479 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
481 return sun4i_ss_cipher_poll(areq);
484 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
486 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
487 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
488 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
490 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
492 return sun4i_ss_cipher_poll(areq);
496 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
498 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
499 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
500 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
502 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
504 return sun4i_ss_cipher_poll(areq);
507 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
509 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
510 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
511 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
513 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
515 return sun4i_ss_cipher_poll(areq);
519 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
521 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
522 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
523 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
525 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
527 return sun4i_ss_cipher_poll(areq);
530 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
532 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
533 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
534 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
536 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
538 return sun4i_ss_cipher_poll(areq);
541 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
543 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
544 struct sun4i_ss_alg_template *algt;
545 const char *name = crypto_tfm_alg_name(tfm);
548 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
550 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
554 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
555 if (IS_ERR(op->fallback_tfm)) {
556 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
557 name, PTR_ERR(op->fallback_tfm));
558 return PTR_ERR(op->fallback_tfm);
561 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
562 sizeof(struct sun4i_cipher_req_ctx) +
563 crypto_skcipher_reqsize(op->fallback_tfm));
566 err = pm_runtime_get_sync(op->ss->dev);
572 crypto_free_skcipher(op->fallback_tfm);
576 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
578 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
580 crypto_free_skcipher(op->fallback_tfm);
581 pm_runtime_put(op->ss->dev);
584 /* check and set the AES key, prepare the mode to be used */
585 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
588 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
589 struct sun4i_ss_ctx *ss = op->ss;
593 op->keymode = SS_AES_128BITS;
596 op->keymode = SS_AES_192BITS;
599 op->keymode = SS_AES_256BITS;
602 dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
606 memcpy(op->key, key, keylen);
608 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
609 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
611 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
614 /* check and set the DES key, prepare the mode to be used */
615 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
618 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
621 err = verify_skcipher_des_key(tfm, key);
626 memcpy(op->key, key, keylen);
628 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
629 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
631 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
634 /* check and set the 3DES key, prepare the mode to be used */
635 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
638 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
641 err = verify_skcipher_des3_key(tfm, key);
646 memcpy(op->key, key, keylen);
648 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
649 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
651 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);