1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
11 * You could find the datasheet in Documentation/arm/sunxi.rst
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
23 void *backup_iv = NULL;
24 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
25 u32 rx_cnt = SS_RX_DEFAULT;
31 unsigned int ileft = areq->cryptlen;
32 unsigned int oleft = areq->cryptlen;
34 struct sg_mapping_iter mi, mo;
35 unsigned int oi, oo; /* offset for in and out */
41 if (!areq->src || !areq->dst) {
42 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
46 if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
47 backup_iv = kzalloc(ivsize, GFP_KERNEL);
50 scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
53 spin_lock_irqsave(&ss->slock, flags);
55 for (i = 0; i < op->keylen; i += 4)
56 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
59 for (i = 0; i < 4 && i < ivsize / 4; i++) {
60 v = *(u32 *)(areq->iv + i * 4);
61 writel(v, ss->base + SS_IV0 + i * 4);
64 writel(mode, ss->base + SS_CTL);
66 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
67 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
68 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
69 SG_MITER_TO_SG | SG_MITER_ATOMIC);
72 if (!mi.addr || !mo.addr) {
73 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
78 ileft = areq->cryptlen / 4;
79 oleft = areq->cryptlen / 4;
83 todo = min(rx_cnt, ileft);
84 todo = min_t(size_t, todo, (mi.length - oi) / 4);
87 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
90 if (oi == mi.length) {
95 spaces = readl(ss->base + SS_FCSR);
96 rx_cnt = SS_RXFIFO_SPACES(spaces);
97 tx_cnt = SS_TXFIFO_SPACES(spaces);
99 todo = min(tx_cnt, oleft);
100 todo = min_t(size_t, todo, (mo.length - oo) / 4);
103 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
106 if (oo == mo.length) {
113 if (mode & SS_DECRYPTION) {
114 memcpy(areq->iv, backup_iv, ivsize);
115 kfree_sensitive(backup_iv);
117 scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
125 writel(0, ss->base + SS_CTL);
126 spin_unlock_irqrestore(&ss->slock, flags);
131 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
133 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
134 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
135 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
138 skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
139 skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
140 areq->base.complete, areq->base.data);
141 skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
142 areq->cryptlen, areq->iv);
143 if (ctx->mode & SS_DECRYPTION)
144 err = crypto_skcipher_decrypt(&ctx->fallback_req);
146 err = crypto_skcipher_encrypt(&ctx->fallback_req);
151 /* Generic function that support SG with size not multiple of 4 */
152 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
154 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
155 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
156 struct sun4i_ss_ctx *ss = op->ss;
158 struct scatterlist *in_sg = areq->src;
159 struct scatterlist *out_sg = areq->dst;
160 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
161 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
162 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
163 struct sun4i_ss_alg_template *algt;
164 u32 mode = ctx->mode;
165 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
166 u32 rx_cnt = SS_RX_DEFAULT;
172 unsigned int ileft = areq->cryptlen;
173 unsigned int oleft = areq->cryptlen;
175 void *backup_iv = NULL;
176 struct sg_mapping_iter mi, mo;
177 unsigned int oi, oo; /* offset for in and out */
178 unsigned int ob = 0; /* offset in buf */
179 unsigned int obo = 0; /* offset in bufo*/
180 unsigned int obl = 0; /* length of data in bufo */
187 if (!areq->src || !areq->dst) {
188 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
192 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
193 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
194 need_fallback = true;
197 * if we have only SGs with size multiple of 4,
198 * we can use the SS optimized function
200 while (in_sg && no_chunk == 1) {
201 if ((in_sg->length | in_sg->offset) & 3u)
203 in_sg = sg_next(in_sg);
205 while (out_sg && no_chunk == 1) {
206 if ((out_sg->length | out_sg->offset) & 3u)
208 out_sg = sg_next(out_sg);
211 if (no_chunk == 1 && !need_fallback)
212 return sun4i_ss_opti_poll(areq);
215 return sun4i_ss_cipher_poll_fallback(areq);
217 if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
218 backup_iv = kzalloc(ivsize, GFP_KERNEL);
221 scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
224 spin_lock_irqsave(&ss->slock, flags);
226 for (i = 0; i < op->keylen; i += 4)
227 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
230 for (i = 0; i < 4 && i < ivsize / 4; i++) {
231 v = *(u32 *)(areq->iv + i * 4);
232 writel(v, ss->base + SS_IV0 + i * 4);
235 writel(mode, ss->base + SS_CTL);
237 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
238 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
239 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
240 SG_MITER_TO_SG | SG_MITER_ATOMIC);
243 if (!mi.addr || !mo.addr) {
244 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
248 ileft = areq->cryptlen;
249 oleft = areq->cryptlen;
256 * todo is the number of consecutive 4byte word that we
257 * can read from current SG
259 todo = min(rx_cnt, ileft / 4);
260 todo = min_t(size_t, todo, (mi.length - oi) / 4);
262 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
268 * not enough consecutive bytes, so we need to
269 * linearize in buf. todo is in bytes
270 * After that copy, if we have a multiple of 4
271 * we need to be able to write all buf in one
272 * pass, so it is why we min() with rx_cnt
274 todo = min(rx_cnt * 4 - ob, ileft);
275 todo = min_t(size_t, todo, mi.length - oi);
276 memcpy(ss->buf + ob, mi.addr + oi, todo);
281 writesl(ss->base + SS_RXFIFO, ss->buf,
286 if (oi == mi.length) {
292 spaces = readl(ss->base + SS_FCSR);
293 rx_cnt = SS_RXFIFO_SPACES(spaces);
294 tx_cnt = SS_TXFIFO_SPACES(spaces);
296 "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
298 oi, mi.length, ileft, areq->cryptlen, rx_cnt,
299 oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
303 /* todo in 4bytes word */
304 todo = min(tx_cnt, oleft / 4);
305 todo = min_t(size_t, todo, (mo.length - oo) / 4);
307 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
310 if (oo == mo.length) {
316 * read obl bytes in bufo, we read at maximum for
317 * emptying the device
319 readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
324 * how many bytes we can copy ?
325 * no more than remaining SG size
326 * no more than remaining buffer
327 * no need to test against oleft
330 mo.length - oo, obl - obo);
331 memcpy(mo.addr + oo, ss->bufo + obo, todo);
335 if (oo == mo.length) {
340 /* bufo must be fully used here */
344 if (mode & SS_DECRYPTION) {
345 memcpy(areq->iv, backup_iv, ivsize);
346 kfree_sensitive(backup_iv);
348 scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
356 writel(0, ss->base + SS_CTL);
357 spin_unlock_irqrestore(&ss->slock, flags);
363 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
365 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
366 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
367 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
369 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
371 return sun4i_ss_cipher_poll(areq);
374 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
376 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
377 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
378 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
380 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
382 return sun4i_ss_cipher_poll(areq);
386 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
388 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
389 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
390 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
392 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
394 return sun4i_ss_cipher_poll(areq);
397 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
399 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
400 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
401 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
403 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
405 return sun4i_ss_cipher_poll(areq);
409 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
411 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
412 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
413 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
415 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
417 return sun4i_ss_cipher_poll(areq);
420 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
422 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
423 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
424 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
426 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
428 return sun4i_ss_cipher_poll(areq);
432 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
434 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
435 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
436 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
438 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
440 return sun4i_ss_cipher_poll(areq);
443 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
445 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
446 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
447 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
449 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
451 return sun4i_ss_cipher_poll(areq);
455 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
457 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
458 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
459 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
461 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
463 return sun4i_ss_cipher_poll(areq);
466 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
468 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
469 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
470 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
472 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
474 return sun4i_ss_cipher_poll(areq);
478 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
480 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
481 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
482 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
484 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
486 return sun4i_ss_cipher_poll(areq);
489 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
491 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
492 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
493 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
495 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
497 return sun4i_ss_cipher_poll(areq);
500 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
502 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
503 struct sun4i_ss_alg_template *algt;
504 const char *name = crypto_tfm_alg_name(tfm);
507 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
509 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
513 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
514 if (IS_ERR(op->fallback_tfm)) {
515 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
516 name, PTR_ERR(op->fallback_tfm));
517 return PTR_ERR(op->fallback_tfm);
520 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
521 sizeof(struct sun4i_cipher_req_ctx) +
522 crypto_skcipher_reqsize(op->fallback_tfm));
525 err = pm_runtime_get_sync(op->ss->dev);
531 crypto_free_skcipher(op->fallback_tfm);
535 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
537 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
539 crypto_free_skcipher(op->fallback_tfm);
540 pm_runtime_put(op->ss->dev);
543 /* check and set the AES key, prepare the mode to be used */
544 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
547 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
548 struct sun4i_ss_ctx *ss = op->ss;
552 op->keymode = SS_AES_128BITS;
555 op->keymode = SS_AES_192BITS;
558 op->keymode = SS_AES_256BITS;
561 dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
565 memcpy(op->key, key, keylen);
567 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
568 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
570 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
573 /* check and set the DES key, prepare the mode to be used */
574 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
577 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
580 err = verify_skcipher_des_key(tfm, key);
585 memcpy(op->key, key, keylen);
587 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
588 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
590 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
593 /* check and set the 3DES key, prepare the mode to be used */
594 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
597 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
600 err = verify_skcipher_des3_key(tfm, key);
605 memcpy(op->key, key, keylen);
607 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
608 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
610 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);