1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Shared glue code for 128bit block ciphers
5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
7 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * CTR part based on code (crypto/ctr.c) by:
10 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
13 #include <linux/module.h>
14 #include <crypto/b128ops.h>
15 #include <crypto/internal/skcipher.h>
16 #include <crypto/scatterwalk.h>
17 #include <asm/crypto/glue_helper.h>
19 int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
20 struct skcipher_request *req)
22 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
23 const unsigned int bsize = 128 / 8;
24 struct skcipher_walk walk;
25 bool fpu_enabled = false;
29 err = skcipher_walk_virt(&walk, req, false);
31 while ((nbytes = walk.nbytes)) {
32 const u8 *src = walk.src.virt.addr;
33 u8 *dst = walk.dst.virt.addr;
34 unsigned int func_bytes;
37 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
38 &walk, fpu_enabled, nbytes);
39 for (i = 0; i < gctx->num_funcs; i++) {
40 func_bytes = bsize * gctx->funcs[i].num_blocks;
42 if (nbytes < func_bytes)
45 /* Process multi-block batch */
47 gctx->funcs[i].fn_u.ecb(ctx, dst, src);
51 } while (nbytes >= func_bytes);
56 err = skcipher_walk_done(&walk, nbytes);
59 glue_fpu_end(fpu_enabled);
62 EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
64 int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
65 struct skcipher_request *req)
67 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
68 const unsigned int bsize = 128 / 8;
69 struct skcipher_walk walk;
73 err = skcipher_walk_virt(&walk, req, false);
75 while ((nbytes = walk.nbytes)) {
76 const u128 *src = (u128 *)walk.src.virt.addr;
77 u128 *dst = (u128 *)walk.dst.virt.addr;
78 u128 *iv = (u128 *)walk.iv;
81 u128_xor(dst, src, iv);
82 fn(ctx, (u8 *)dst, (u8 *)dst);
87 } while (nbytes >= bsize);
89 *(u128 *)walk.iv = *iv;
90 err = skcipher_walk_done(&walk, nbytes);
94 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
96 int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
97 struct skcipher_request *req)
99 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
100 const unsigned int bsize = 128 / 8;
101 struct skcipher_walk walk;
102 bool fpu_enabled = false;
106 err = skcipher_walk_virt(&walk, req, false);
108 while ((nbytes = walk.nbytes)) {
109 const u128 *src = walk.src.virt.addr;
110 u128 *dst = walk.dst.virt.addr;
111 unsigned int func_bytes, num_blocks;
115 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
116 &walk, fpu_enabled, nbytes);
117 /* Start of the last block. */
118 src += nbytes / bsize - 1;
119 dst += nbytes / bsize - 1;
123 for (i = 0; i < gctx->num_funcs; i++) {
124 num_blocks = gctx->funcs[i].num_blocks;
125 func_bytes = bsize * num_blocks;
127 if (nbytes < func_bytes)
130 /* Process multi-block batch */
132 src -= num_blocks - 1;
133 dst -= num_blocks - 1;
135 gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
138 nbytes -= func_bytes;
142 u128_xor(dst, dst, --src);
144 } while (nbytes >= func_bytes);
147 u128_xor(dst, dst, (u128 *)walk.iv);
148 *(u128 *)walk.iv = last_iv;
149 err = skcipher_walk_done(&walk, nbytes);
152 glue_fpu_end(fpu_enabled);
155 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
157 int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
158 struct skcipher_request *req)
160 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
161 const unsigned int bsize = 128 / 8;
162 struct skcipher_walk walk;
163 bool fpu_enabled = false;
167 err = skcipher_walk_virt(&walk, req, false);
169 while ((nbytes = walk.nbytes) >= bsize) {
170 const u128 *src = walk.src.virt.addr;
171 u128 *dst = walk.dst.virt.addr;
172 unsigned int func_bytes, num_blocks;
176 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
177 &walk, fpu_enabled, nbytes);
179 be128_to_le128(&ctrblk, (be128 *)walk.iv);
181 for (i = 0; i < gctx->num_funcs; i++) {
182 num_blocks = gctx->funcs[i].num_blocks;
183 func_bytes = bsize * num_blocks;
185 if (nbytes < func_bytes)
188 /* Process multi-block batch */
190 gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
195 nbytes -= func_bytes;
196 } while (nbytes >= func_bytes);
202 le128_to_be128((be128 *)walk.iv, &ctrblk);
203 err = skcipher_walk_done(&walk, nbytes);
206 glue_fpu_end(fpu_enabled);
212 be128_to_le128(&ctrblk, (be128 *)walk.iv);
213 memcpy(&tmp, walk.src.virt.addr, nbytes);
214 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
217 memcpy(walk.dst.virt.addr, &tmp, nbytes);
218 le128_to_be128((be128 *)walk.iv, &ctrblk);
220 err = skcipher_walk_done(&walk, 0);
225 EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
227 MODULE_LICENSE("GPL");