7b991b77fb12fd638c0814202819c042c6b78ea8
[linux-2.6-microblaze.git] / drivers / crypto / sa2ul.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * K3 SA2UL crypto accelerator driver
4  *
5  * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Authors:     Keerthy
8  *              Vitaly Andrianov
9  *              Tero Kristo
10  */
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of_device.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20
21 #include <crypto/aes.h>
22 #include <crypto/authenc.h>
23 #include <crypto/des.h>
24 #include <crypto/internal/aead.h>
25 #include <crypto/internal/hash.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/sha1.h>
29 #include <crypto/sha2.h>
30
31 #include "sa2ul.h"
32
33 /* Byte offset for key in encryption security context */
34 #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
35 /* Byte offset for Aux-1 in encryption security context */
36 #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
37
38 #define SA_CMDL_UPD_ENC         0x0001
39 #define SA_CMDL_UPD_AUTH        0x0002
40 #define SA_CMDL_UPD_ENC_IV      0x0004
41 #define SA_CMDL_UPD_AUTH_IV     0x0008
42 #define SA_CMDL_UPD_AUX_KEY     0x0010
43
44 #define SA_AUTH_SUBKEY_LEN      16
45 #define SA_CMDL_PAYLOAD_LENGTH_MASK     0xFFFF
46 #define SA_CMDL_SOP_BYPASS_LEN_MASK     0xFF000000
47
48 #define MODE_CONTROL_BYTES      27
49 #define SA_HASH_PROCESSING      0
50 #define SA_CRYPTO_PROCESSING    0
51 #define SA_UPLOAD_HASH_TO_TLR   BIT(6)
52
53 #define SA_SW0_FLAGS_MASK       0xF0000
54 #define SA_SW0_CMDL_INFO_MASK   0x1F00000
55 #define SA_SW0_CMDL_PRESENT     BIT(4)
56 #define SA_SW0_ENG_ID_MASK      0x3E000000
57 #define SA_SW0_DEST_INFO_PRESENT        BIT(30)
58 #define SA_SW2_EGRESS_LENGTH            0xFF000000
59 #define SA_BASIC_HASH           0x10
60
61 #define SHA256_DIGEST_WORDS    8
62 /* Make 32-bit word from 4 bytes */
63 #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
64                                    ((b2) << 8) | (b3))
65
66 /* size of SCCTL structure in bytes */
67 #define SA_SCCTL_SZ 16
68
69 /* Max Authentication tag size */
70 #define SA_MAX_AUTH_TAG_SZ 64
71
72 #define PRIV_ID 0x1
73 #define PRIV    0x1
74
75 static struct device *sa_k3_dev;
76
77 /**
78  * struct sa_cmdl_cfg - Command label configuration descriptor
79  * @aalg: authentication algorithm ID
80  * @enc_eng_id: Encryption Engine ID supported by the SA hardware
81  * @auth_eng_id: Authentication Engine ID
82  * @iv_size: Initialization Vector size
83  * @akey: Authentication key
84  * @akey_len: Authentication key length
85  * @enc: True, if this is an encode request
86  */
87 struct sa_cmdl_cfg {
88         int aalg;
89         u8 enc_eng_id;
90         u8 auth_eng_id;
91         u8 iv_size;
92         const u8 *akey;
93         u16 akey_len;
94         bool enc;
95 };
96
97 /**
98  * struct algo_data - Crypto algorithm specific data
99  * @enc_eng: Encryption engine info structure
100  * @auth_eng: Authentication engine info structure
101  * @auth_ctrl: Authentication control word
102  * @hash_size: Size of digest
103  * @iv_idx: iv index in psdata
104  * @iv_out_size: iv out size
105  * @ealg_id: Encryption Algorithm ID
106  * @aalg_id: Authentication algorithm ID
107  * @mci_enc: Mode Control Instruction for Encryption algorithm
108  * @mci_dec: Mode Control Instruction for Decryption
109  * @inv_key: Whether the encryption algorithm demands key inversion
110  * @ctx: Pointer to the algorithm context
111  * @keyed_mac: Whether the authentication algorithm has key
112  * @prep_iopad: Function pointer to generate intermediate ipad/opad
113  */
114 struct algo_data {
115         struct sa_eng_info enc_eng;
116         struct sa_eng_info auth_eng;
117         u8 auth_ctrl;
118         u8 hash_size;
119         u8 iv_idx;
120         u8 iv_out_size;
121         u8 ealg_id;
122         u8 aalg_id;
123         u8 *mci_enc;
124         u8 *mci_dec;
125         bool inv_key;
126         struct sa_tfm_ctx *ctx;
127         bool keyed_mac;
128         void (*prep_iopad)(struct algo_data *algo, const u8 *key,
129                            u16 key_sz, __be32 *ipad, __be32 *opad);
130 };
131
132 /**
133  * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
134  * @type: Type of the crypto algorithm.
135  * @alg: Union of crypto algorithm definitions.
136  * @registered: Flag indicating if the crypto algorithm is already registered
137  */
138 struct sa_alg_tmpl {
139         u32 type;               /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
140         union {
141                 struct skcipher_alg skcipher;
142                 struct ahash_alg ahash;
143                 struct aead_alg aead;
144         } alg;
145         bool registered;
146 };
147
148 /**
149  * struct sa_mapped_sg: scatterlist information for tx and rx
150  * @mapped: Set to true if the @sgt is mapped
151  * @dir: mapping direction used for @sgt
152  * @split_sg: Set if the sg is split and needs to be freed up
153  * @static_sg: Static scatterlist entry for overriding data
154  * @sgt: scatterlist table for DMA API use
155  */
156 struct sa_mapped_sg {
157         bool mapped;
158         enum dma_data_direction dir;
159         struct scatterlist static_sg;
160         struct scatterlist *split_sg;
161         struct sg_table sgt;
162 };
163 /**
164  * struct sa_rx_data: RX Packet miscellaneous data place holder
165  * @req: crypto request data pointer
166  * @ddev: pointer to the DMA device
167  * @tx_in: dma_async_tx_descriptor pointer for rx channel
168  * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
169  * @enc: Flag indicating either encryption or decryption
170  * @enc_iv_size: Initialisation vector size
171  * @iv_idx: Initialisation vector index
172  */
173 struct sa_rx_data {
174         void *req;
175         struct device *ddev;
176         struct dma_async_tx_descriptor *tx_in;
177         struct sa_mapped_sg mapped_sg[2];
178         u8 enc;
179         u8 enc_iv_size;
180         u8 iv_idx;
181 };
182
183 /**
184  * struct sa_req: SA request definition
185  * @dev: device for the request
186  * @size: total data to the xmitted via DMA
187  * @enc_offset: offset of cipher data
188  * @enc_size: data to be passed to cipher engine
189  * @enc_iv: cipher IV
190  * @auth_offset: offset of the authentication data
191  * @auth_size: size of the authentication data
192  * @auth_iv: authentication IV
193  * @type: algorithm type for the request
194  * @cmdl: command label pointer
195  * @base: pointer to the base request
196  * @ctx: pointer to the algorithm context data
197  * @enc: true if this is an encode request
198  * @src: source data
199  * @dst: destination data
200  * @callback: DMA callback for the request
201  * @mdata_size: metadata size passed to DMA
202  */
203 struct sa_req {
204         struct device *dev;
205         u16 size;
206         u8 enc_offset;
207         u16 enc_size;
208         u8 *enc_iv;
209         u8 auth_offset;
210         u16 auth_size;
211         u8 *auth_iv;
212         u32 type;
213         u32 *cmdl;
214         struct crypto_async_request *base;
215         struct sa_tfm_ctx *ctx;
216         bool enc;
217         struct scatterlist *src;
218         struct scatterlist *dst;
219         dma_async_tx_callback callback;
220         u16 mdata_size;
221 };
222
223 /*
224  * Mode Control Instructions for various Key lengths 128, 192, 256
225  * For CBC (Cipher Block Chaining) mode for encryption
226  */
227 static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
228         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
229                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
230                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
231         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
232                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
233                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
234         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
235                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
236                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
237 };
238
239 /*
240  * Mode Control Instructions for various Key lengths 128, 192, 256
241  * For CBC (Cipher Block Chaining) mode for decryption
242  */
243 static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
244         {       0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
245                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
246                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
247         {       0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
248                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
249                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
250         {       0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
251                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
252                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
253 };
254
255 /*
256  * Mode Control Instructions for various Key lengths 128, 192, 256
257  * For CBC (Cipher Block Chaining) mode for encryption
258  */
259 static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
260         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
261                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
263         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
264                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
266         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
267                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
269 };
270
271 /*
272  * Mode Control Instructions for various Key lengths 128, 192, 256
273  * For CBC (Cipher Block Chaining) mode for decryption
274  */
275 static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
276         {       0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
277                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
278                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
279         {       0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
280                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
282         {       0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
283                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
284                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
285 };
286
287 /*
288  * Mode Control Instructions for various Key lengths 128, 192, 256
289  * For ECB (Electronic Code Book) mode for encryption
290  */
291 static u8 mci_ecb_enc_array[3][27] = {
292         {       0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
293                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
295         {       0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
296                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
297                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
298         {       0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
299                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
300                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
301 };
302
303 /*
304  * Mode Control Instructions for various Key lengths 128, 192, 256
305  * For ECB (Electronic Code Book) mode for decryption
306  */
307 static u8 mci_ecb_dec_array[3][27] = {
308         {       0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
309                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
310                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
311         {       0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
312                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
314         {       0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
315                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
316                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
317 };
318
319 /*
320  * Mode Control Instructions for DES algorithm
321  * For CBC (Cipher Block Chaining) mode and ECB mode
322  * encryption and for decryption respectively
323  */
324 static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
325         0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
326         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327         0x00, 0x00, 0x00,
328 };
329
330 static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
331         0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
332         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333         0x00, 0x00, 0x00,
334 };
335
336 static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
337         0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
338         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
339         0x00, 0x00, 0x00,
340 };
341
342 static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
343         0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
344         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00,
346 };
347
348 /*
349  * Perform 16 byte or 128 bit swizzling
350  * The SA2UL Expects the security context to
351  * be in little Endian and the bus width is 128 bits or 16 bytes
352  * Hence swap 16 bytes at a time from higher to lower address
353  */
354 static void sa_swiz_128(u8 *in, u16 len)
355 {
356         u8 data[16];
357         int i, j;
358
359         for (i = 0; i < len; i += 16) {
360                 memcpy(data, &in[i], 16);
361                 for (j = 0; j < 16; j++)
362                         in[i + j] = data[15 - j];
363         }
364 }
365
366 /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
367 static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
368 {
369         int i;
370
371         for (i = 0; i < key_sz; i++)
372                 k_ipad[i] = key[i] ^ 0x36;
373
374         /* Instead of XOR with 0 */
375         for (; i < SHA1_BLOCK_SIZE; i++)
376                 k_ipad[i] = 0x36;
377 }
378
379 static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
380 {
381         int i;
382
383         for (i = 0; i < key_sz; i++)
384                 k_opad[i] = key[i] ^ 0x5c;
385
386         /* Instead of XOR with 0 */
387         for (; i < SHA1_BLOCK_SIZE; i++)
388                 k_opad[i] = 0x5c;
389 }
390
391 static void sa_export_shash(void *state, struct shash_desc *hash,
392                             int digest_size, __be32 *out)
393 {
394         struct sha1_state *sha1;
395         struct sha256_state *sha256;
396         u32 *result;
397
398         switch (digest_size) {
399         case SHA1_DIGEST_SIZE:
400                 sha1 = state;
401                 result = sha1->state;
402                 break;
403         case SHA256_DIGEST_SIZE:
404                 sha256 = state;
405                 result = sha256->state;
406                 break;
407         default:
408                 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
409                         digest_size);
410                 return;
411         }
412
413         crypto_shash_export(hash, state);
414
415         cpu_to_be32_array(out, result, digest_size / 4);
416 }
417
418 static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
419                               u16 key_sz, __be32 *ipad, __be32 *opad)
420 {
421         SHASH_DESC_ON_STACK(shash, data->ctx->shash);
422         int block_size = crypto_shash_blocksize(data->ctx->shash);
423         int digest_size = crypto_shash_digestsize(data->ctx->shash);
424         union {
425                 struct sha1_state sha1;
426                 struct sha256_state sha256;
427                 u8 k_pad[SHA1_BLOCK_SIZE];
428         } sha;
429
430         shash->tfm = data->ctx->shash;
431
432         prepare_kipad(sha.k_pad, key, key_sz);
433
434         crypto_shash_init(shash);
435         crypto_shash_update(shash, sha.k_pad, block_size);
436         sa_export_shash(&sha, shash, digest_size, ipad);
437
438         prepare_kopad(sha.k_pad, key, key_sz);
439
440         crypto_shash_init(shash);
441         crypto_shash_update(shash, sha.k_pad, block_size);
442
443         sa_export_shash(&sha, shash, digest_size, opad);
444
445         memzero_explicit(&sha, sizeof(sha));
446 }
447
448 /* Derive the inverse key used in AES-CBC decryption operation */
449 static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
450 {
451         struct crypto_aes_ctx ctx;
452         int key_pos;
453
454         if (aes_expandkey(&ctx, key, key_sz)) {
455                 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
456                 return -EINVAL;
457         }
458
459         /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
460         if (key_sz == AES_KEYSIZE_192) {
461                 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
462                 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
463         }
464
465         /* Based crypto_aes_expand_key logic */
466         switch (key_sz) {
467         case AES_KEYSIZE_128:
468         case AES_KEYSIZE_192:
469                 key_pos = key_sz + 24;
470                 break;
471
472         case AES_KEYSIZE_256:
473                 key_pos = key_sz + 24 - 4;
474                 break;
475
476         default:
477                 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
478                 return -EINVAL;
479         }
480
481         memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
482         return 0;
483 }
484
485 /* Set Security context for the encryption engine */
486 static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
487                          u8 enc, u8 *sc_buf)
488 {
489         const u8 *mci = NULL;
490
491         /* Set Encryption mode selector to crypto processing */
492         sc_buf[0] = SA_CRYPTO_PROCESSING;
493
494         if (enc)
495                 mci = ad->mci_enc;
496         else
497                 mci = ad->mci_dec;
498         /* Set the mode control instructions in security context */
499         if (mci)
500                 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
501
502         /* For AES-CBC decryption get the inverse key */
503         if (ad->inv_key && !enc) {
504                 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
505                         return -EINVAL;
506         /* For all other cases: key is used */
507         } else {
508                 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
509         }
510
511         return 0;
512 }
513
514 /* Set Security context for the authentication engine */
515 static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
516                            u8 *sc_buf)
517 {
518         __be32 *ipad = (void *)(sc_buf + 32);
519         __be32 *opad = (void *)(sc_buf + 64);
520
521         /* Set Authentication mode selector to hash processing */
522         sc_buf[0] = SA_HASH_PROCESSING;
523         /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
524         sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
525         sc_buf[1] |= ad->auth_ctrl;
526
527         /* Copy the keys or ipad/opad */
528         if (ad->keyed_mac)
529                 ad->prep_iopad(ad, key, key_sz, ipad, opad);
530         else {
531                 /* basic hash */
532                 sc_buf[1] |= SA_BASIC_HASH;
533         }
534 }
535
536 static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
537 {
538         int j;
539
540         for (j = 0; j < ((size16) ? 4 : 2); j++) {
541                 *out = cpu_to_be32(*((u32 *)iv));
542                 iv += 4;
543                 out++;
544         }
545 }
546
547 /* Format general command label */
548 static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
549                               struct sa_cmdl_upd_info *upd_info)
550 {
551         u8 enc_offset = 0, auth_offset = 0, total = 0;
552         u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
553         u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
554         u32 *word_ptr = (u32 *)cmdl;
555         int i;
556
557         /* Clear the command label */
558         memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
559
560         /* Iniialize the command update structure */
561         memzero_explicit(upd_info, sizeof(*upd_info));
562
563         if (cfg->enc_eng_id && cfg->auth_eng_id) {
564                 if (cfg->enc) {
565                         auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
566                         enc_next_eng = cfg->auth_eng_id;
567
568                         if (cfg->iv_size)
569                                 auth_offset += cfg->iv_size;
570                 } else {
571                         enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
572                         auth_next_eng = cfg->enc_eng_id;
573                 }
574         }
575
576         if (cfg->enc_eng_id) {
577                 upd_info->flags |= SA_CMDL_UPD_ENC;
578                 upd_info->enc_size.index = enc_offset >> 2;
579                 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
580                 /* Encryption command label */
581                 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
582
583                 /* Encryption modes requiring IV */
584                 if (cfg->iv_size) {
585                         upd_info->flags |= SA_CMDL_UPD_ENC_IV;
586                         upd_info->enc_iv.index =
587                                 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
588                         upd_info->enc_iv.size = cfg->iv_size;
589
590                         cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
591                                 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
592
593                         cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
594                                 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
595                         total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
596                 } else {
597                         cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
598                                                 SA_CMDL_HEADER_SIZE_BYTES;
599                         total += SA_CMDL_HEADER_SIZE_BYTES;
600                 }
601         }
602
603         if (cfg->auth_eng_id) {
604                 upd_info->flags |= SA_CMDL_UPD_AUTH;
605                 upd_info->auth_size.index = auth_offset >> 2;
606                 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
607                 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
608                 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
609                         SA_CMDL_HEADER_SIZE_BYTES;
610                 total += SA_CMDL_HEADER_SIZE_BYTES;
611         }
612
613         total = roundup(total, 8);
614
615         for (i = 0; i < total / 4; i++)
616                 word_ptr[i] = swab32(word_ptr[i]);
617
618         return total;
619 }
620
621 /* Update Command label */
622 static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
623                                   struct sa_cmdl_upd_info *upd_info)
624 {
625         int i = 0, j;
626
627         if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
628                 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
629                 cmdl[upd_info->enc_size.index] |= req->enc_size;
630                 cmdl[upd_info->enc_offset.index] &=
631                                                 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
632                 cmdl[upd_info->enc_offset.index] |=
633                         ((u32)req->enc_offset <<
634                          __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
635
636                 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
637                         __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
638                         u32 *enc_iv = (u32 *)req->enc_iv;
639
640                         for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
641                                 data[j] = cpu_to_be32(*enc_iv);
642                                 enc_iv++;
643                         }
644                 }
645         }
646
647         if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
648                 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
649                 cmdl[upd_info->auth_size.index] |= req->auth_size;
650                 cmdl[upd_info->auth_offset.index] &=
651                         ~SA_CMDL_SOP_BYPASS_LEN_MASK;
652                 cmdl[upd_info->auth_offset.index] |=
653                         ((u32)req->auth_offset <<
654                          __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
655                 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
656                         sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
657                                    req->auth_iv,
658                                    (upd_info->auth_iv.size > 8));
659                 }
660                 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
661                         int offset = (req->auth_size & 0xF) ? 4 : 0;
662
663                         memcpy(&cmdl[upd_info->aux_key_info.index],
664                                &upd_info->aux_key[offset], 16);
665                 }
666         }
667 }
668
669 /* Format SWINFO words to be sent to SA */
670 static
671 void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
672                    u8 cmdl_present, u8 cmdl_offset, u8 flags,
673                    u8 hash_size, u32 *swinfo)
674 {
675         swinfo[0] = sc_id;
676         swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
677         if (likely(cmdl_present))
678                 swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
679                                                 __ffs(SA_SW0_CMDL_INFO_MASK));
680         swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
681
682         swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
683         swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
684         swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
685         swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
686 }
687
688 /* Dump the security context */
689 static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
690 {
691 #ifdef DEBUG
692         dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
693         print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
694                        16, 1, buf, SA_CTX_MAX_SZ, false);
695 #endif
696 }
697
698 static
699 int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
700                u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
701                struct algo_data *ad, u8 enc, u32 *swinfo)
702 {
703         int enc_sc_offset = 0;
704         int auth_sc_offset = 0;
705         u8 *sc_buf = ctx->sc;
706         u16 sc_id = ctx->sc_id;
707         u8 first_engine = 0;
708
709         memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
710
711         if (ad->auth_eng.eng_id) {
712                 if (enc)
713                         first_engine = ad->enc_eng.eng_id;
714                 else
715                         first_engine = ad->auth_eng.eng_id;
716
717                 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
718                 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
719                 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
720                 if (!ad->hash_size)
721                         return -EINVAL;
722                 ad->hash_size = roundup(ad->hash_size, 8);
723
724         } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
725                 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
726                 first_engine = ad->enc_eng.eng_id;
727                 sc_buf[1] = SA_SCCTL_FE_ENC;
728                 ad->hash_size = ad->iv_out_size;
729         }
730
731         /* SCCTL Owner info: 0=host, 1=CP_ACE */
732         sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
733         memcpy(&sc_buf[2], &sc_id, 2);
734         sc_buf[4] = 0x0;
735         sc_buf[5] = PRIV_ID;
736         sc_buf[6] = PRIV;
737         sc_buf[7] = 0x0;
738
739         /* Prepare context for encryption engine */
740         if (ad->enc_eng.sc_size) {
741                 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
742                                   &sc_buf[enc_sc_offset]))
743                         return -EINVAL;
744         }
745
746         /* Prepare context for authentication engine */
747         if (ad->auth_eng.sc_size)
748                 sa_set_sc_auth(ad, auth_key, auth_key_sz,
749                                &sc_buf[auth_sc_offset]);
750
751         /* Set the ownership of context to CP_ACE */
752         sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
753
754         /* swizzle the security context */
755         sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
756
757         sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
758                       SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
759
760         sa_dump_sc(sc_buf, ctx->sc_phys);
761
762         return 0;
763 }
764
765 /* Free the per direction context memory */
766 static void sa_free_ctx_info(struct sa_ctx_info *ctx,
767                              struct sa_crypto_data *data)
768 {
769         unsigned long bn;
770
771         bn = ctx->sc_id - data->sc_id_start;
772         spin_lock(&data->scid_lock);
773         __clear_bit(bn, data->ctx_bm);
774         data->sc_id--;
775         spin_unlock(&data->scid_lock);
776
777         if (ctx->sc) {
778                 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
779                 ctx->sc = NULL;
780         }
781 }
782
783 static int sa_init_ctx_info(struct sa_ctx_info *ctx,
784                             struct sa_crypto_data *data)
785 {
786         unsigned long bn;
787         int err;
788
789         spin_lock(&data->scid_lock);
790         bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
791         __set_bit(bn, data->ctx_bm);
792         data->sc_id++;
793         spin_unlock(&data->scid_lock);
794
795         ctx->sc_id = (u16)(data->sc_id_start + bn);
796
797         ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
798         if (!ctx->sc) {
799                 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
800                 err = -ENOMEM;
801                 goto scid_rollback;
802         }
803
804         return 0;
805
806 scid_rollback:
807         spin_lock(&data->scid_lock);
808         __clear_bit(bn, data->ctx_bm);
809         data->sc_id--;
810         spin_unlock(&data->scid_lock);
811
812         return err;
813 }
814
815 static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
816 {
817         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
818         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
819
820         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
821                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
822                 ctx->dec.sc_id, &ctx->dec.sc_phys);
823
824         sa_free_ctx_info(&ctx->enc, data);
825         sa_free_ctx_info(&ctx->dec, data);
826
827         crypto_free_skcipher(ctx->fallback.skcipher);
828 }
829
830 static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
831 {
832         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
833         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
834         const char *name = crypto_tfm_alg_name(&tfm->base);
835         struct crypto_skcipher *child;
836         int ret;
837
838         memzero_explicit(ctx, sizeof(*ctx));
839         ctx->dev_data = data;
840
841         ret = sa_init_ctx_info(&ctx->enc, data);
842         if (ret)
843                 return ret;
844         ret = sa_init_ctx_info(&ctx->dec, data);
845         if (ret) {
846                 sa_free_ctx_info(&ctx->enc, data);
847                 return ret;
848         }
849
850         child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
851
852         if (IS_ERR(child)) {
853                 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
854                 return PTR_ERR(child);
855         }
856
857         ctx->fallback.skcipher = child;
858         crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
859                                          sizeof(struct skcipher_request));
860
861         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
862                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
863                 ctx->dec.sc_id, &ctx->dec.sc_phys);
864         return 0;
865 }
866
867 static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
868                             unsigned int keylen, struct algo_data *ad)
869 {
870         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
871         struct crypto_skcipher *child = ctx->fallback.skcipher;
872         int cmdl_len;
873         struct sa_cmdl_cfg cfg;
874         int ret;
875
876         if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
877             keylen != AES_KEYSIZE_256)
878                 return -EINVAL;
879
880         ad->enc_eng.eng_id = SA_ENG_ID_EM1;
881         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
882
883         memzero_explicit(&cfg, sizeof(cfg));
884         cfg.enc_eng_id = ad->enc_eng.eng_id;
885         cfg.iv_size = crypto_skcipher_ivsize(tfm);
886
887         crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
888         crypto_skcipher_set_flags(child, tfm->base.crt_flags &
889                                          CRYPTO_TFM_REQ_MASK);
890         ret = crypto_skcipher_setkey(child, key, keylen);
891         if (ret)
892                 return ret;
893
894         /* Setup Encryption Security Context & Command label template */
895         if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
896                        &ctx->enc.epib[1]))
897                 goto badkey;
898
899         cmdl_len = sa_format_cmdl_gen(&cfg,
900                                       (u8 *)ctx->enc.cmdl,
901                                       &ctx->enc.cmdl_upd_info);
902         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
903                 goto badkey;
904
905         ctx->enc.cmdl_size = cmdl_len;
906
907         /* Setup Decryption Security Context & Command label template */
908         if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
909                        &ctx->dec.epib[1]))
910                 goto badkey;
911
912         cfg.enc_eng_id = ad->enc_eng.eng_id;
913         cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
914                                       &ctx->dec.cmdl_upd_info);
915
916         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
917                 goto badkey;
918
919         ctx->dec.cmdl_size = cmdl_len;
920         ctx->iv_idx = ad->iv_idx;
921
922         return 0;
923
924 badkey:
925         dev_err(sa_k3_dev, "%s: badkey\n", __func__);
926         return -EINVAL;
927 }
928
929 static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
930                              unsigned int keylen)
931 {
932         struct algo_data ad = { 0 };
933         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
934         int key_idx = (keylen >> 3) - 2;
935
936         if (key_idx >= 3)
937                 return -EINVAL;
938
939         ad.mci_enc = mci_cbc_enc_array[key_idx];
940         ad.mci_dec = mci_cbc_dec_array[key_idx];
941         ad.inv_key = true;
942         ad.ealg_id = SA_EALG_ID_AES_CBC;
943         ad.iv_idx = 4;
944         ad.iv_out_size = 16;
945
946         return sa_cipher_setkey(tfm, key, keylen, &ad);
947 }
948
949 static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
950                              unsigned int keylen)
951 {
952         struct algo_data ad = { 0 };
953         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
954         int key_idx = (keylen >> 3) - 2;
955
956         if (key_idx >= 3)
957                 return -EINVAL;
958
959         ad.mci_enc = mci_ecb_enc_array[key_idx];
960         ad.mci_dec = mci_ecb_dec_array[key_idx];
961         ad.inv_key = true;
962         ad.ealg_id = SA_EALG_ID_AES_ECB;
963
964         return sa_cipher_setkey(tfm, key, keylen, &ad);
965 }
966
967 static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
968                               unsigned int keylen)
969 {
970         struct algo_data ad = { 0 };
971
972         ad.mci_enc = mci_cbc_3des_enc_array;
973         ad.mci_dec = mci_cbc_3des_dec_array;
974         ad.ealg_id = SA_EALG_ID_3DES_CBC;
975         ad.iv_idx = 6;
976         ad.iv_out_size = 8;
977
978         return sa_cipher_setkey(tfm, key, keylen, &ad);
979 }
980
981 static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
982                               unsigned int keylen)
983 {
984         struct algo_data ad = { 0 };
985
986         ad.mci_enc = mci_ecb_3des_enc_array;
987         ad.mci_dec = mci_ecb_3des_dec_array;
988
989         return sa_cipher_setkey(tfm, key, keylen, &ad);
990 }
991
992 static void sa_sync_from_device(struct sa_rx_data *rxd)
993 {
994         struct sg_table *sgt;
995
996         if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
997                 sgt = &rxd->mapped_sg[0].sgt;
998         else
999                 sgt = &rxd->mapped_sg[1].sgt;
1000
1001         dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1002 }
1003
1004 static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1005 {
1006         int i;
1007
1008         for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1009                 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1010
1011                 if (mapped_sg->mapped) {
1012                         dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1013                                           mapped_sg->dir, 0);
1014                         kfree(mapped_sg->split_sg);
1015                 }
1016         }
1017
1018         kfree(rxd);
1019 }
1020
1021 static void sa_aes_dma_in_callback(void *data)
1022 {
1023         struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1024         struct skcipher_request *req;
1025         u32 *result;
1026         __be32 *mdptr;
1027         size_t ml, pl;
1028         int i;
1029
1030         sa_sync_from_device(rxd);
1031         req = container_of(rxd->req, struct skcipher_request, base);
1032
1033         if (req->iv) {
1034                 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1035                                                                &ml);
1036                 result = (u32 *)req->iv;
1037
1038                 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1039                         result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1040         }
1041
1042         sa_free_sa_rx_data(rxd);
1043
1044         skcipher_request_complete(req, 0);
1045 }
1046
1047 static void
1048 sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1049 {
1050         u32 *out, *in;
1051         int i;
1052
1053         for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1054                 *out++ = *in++;
1055
1056         mdptr[4] = (0xFFFF << 16);
1057         for (out = &mdptr[5], in = psdata, i = 0;
1058              i < pslen / sizeof(u32); i++)
1059                 *out++ = *in++;
1060 }
1061
1062 static int sa_run(struct sa_req *req)
1063 {
1064         struct sa_rx_data *rxd;
1065         gfp_t gfp_flags;
1066         u32 cmdl[SA_MAX_CMDL_WORDS];
1067         struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1068         struct device *ddev;
1069         struct dma_chan *dma_rx;
1070         int sg_nents, src_nents, dst_nents;
1071         struct scatterlist *src, *dst;
1072         size_t pl, ml, split_size;
1073         struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1074         int ret;
1075         struct dma_async_tx_descriptor *tx_out;
1076         u32 *mdptr;
1077         bool diff_dst;
1078         enum dma_data_direction dir_src;
1079         struct sa_mapped_sg *mapped_sg;
1080
1081         gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1082                 GFP_KERNEL : GFP_ATOMIC;
1083
1084         rxd = kzalloc(sizeof(*rxd), gfp_flags);
1085         if (!rxd)
1086                 return -ENOMEM;
1087
1088         if (req->src != req->dst) {
1089                 diff_dst = true;
1090                 dir_src = DMA_TO_DEVICE;
1091         } else {
1092                 diff_dst = false;
1093                 dir_src = DMA_BIDIRECTIONAL;
1094         }
1095
1096         /*
1097          * SA2UL has an interesting feature where the receive DMA channel
1098          * is selected based on the data passed to the engine. Within the
1099          * transition range, there is also a space where it is impossible
1100          * to determine where the data will end up, and this should be
1101          * avoided. This will be handled by the SW fallback mechanism by
1102          * the individual algorithm implementations.
1103          */
1104         if (req->size >= 256)
1105                 dma_rx = pdata->dma_rx2;
1106         else
1107                 dma_rx = pdata->dma_rx1;
1108
1109         ddev = dmaengine_get_dma_device(pdata->dma_tx);
1110         rxd->ddev = ddev;
1111
1112         memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1113
1114         sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1115
1116         if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1117                 if (req->enc)
1118                         req->type |=
1119                                 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1120                 else
1121                         req->type |=
1122                                 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1123         }
1124
1125         cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1126
1127         /*
1128          * Map the packets, first we check if the data fits into a single
1129          * sg entry and use that if possible. If it does not fit, we check
1130          * if we need to do sg_split to align the scatterlist data on the
1131          * actual data size being processed by the crypto engine.
1132          */
1133         src = req->src;
1134         sg_nents = sg_nents_for_len(src, req->size);
1135
1136         split_size = req->size;
1137
1138         mapped_sg = &rxd->mapped_sg[0];
1139         if (sg_nents == 1 && split_size <= req->src->length) {
1140                 src = &mapped_sg->static_sg;
1141                 src_nents = 1;
1142                 sg_init_table(src, 1);
1143                 sg_set_page(src, sg_page(req->src), split_size,
1144                             req->src->offset);
1145
1146                 mapped_sg->sgt.sgl = src;
1147                 mapped_sg->sgt.orig_nents = src_nents;
1148                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1149                 if (ret) {
1150                         kfree(rxd);
1151                         return ret;
1152                 }
1153
1154                 mapped_sg->dir = dir_src;
1155                 mapped_sg->mapped = true;
1156         } else {
1157                 mapped_sg->sgt.sgl = req->src;
1158                 mapped_sg->sgt.orig_nents = sg_nents;
1159                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1160                 if (ret) {
1161                         kfree(rxd);
1162                         return ret;
1163                 }
1164
1165                 mapped_sg->dir = dir_src;
1166                 mapped_sg->mapped = true;
1167
1168                 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1169                                &split_size, &src, &src_nents, gfp_flags);
1170                 if (ret) {
1171                         src_nents = mapped_sg->sgt.nents;
1172                         src = mapped_sg->sgt.sgl;
1173                 } else {
1174                         mapped_sg->split_sg = src;
1175                 }
1176         }
1177
1178         dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1179
1180         if (!diff_dst) {
1181                 dst_nents = src_nents;
1182                 dst = src;
1183         } else {
1184                 dst_nents = sg_nents_for_len(req->dst, req->size);
1185                 mapped_sg = &rxd->mapped_sg[1];
1186
1187                 if (dst_nents == 1 && split_size <= req->dst->length) {
1188                         dst = &mapped_sg->static_sg;
1189                         dst_nents = 1;
1190                         sg_init_table(dst, 1);
1191                         sg_set_page(dst, sg_page(req->dst), split_size,
1192                                     req->dst->offset);
1193
1194                         mapped_sg->sgt.sgl = dst;
1195                         mapped_sg->sgt.orig_nents = dst_nents;
1196                         ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1197                                               DMA_FROM_DEVICE, 0);
1198                         if (ret)
1199                                 goto err_cleanup;
1200
1201                         mapped_sg->dir = DMA_FROM_DEVICE;
1202                         mapped_sg->mapped = true;
1203                 } else {
1204                         mapped_sg->sgt.sgl = req->dst;
1205                         mapped_sg->sgt.orig_nents = dst_nents;
1206                         ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1207                                               DMA_FROM_DEVICE, 0);
1208                         if (ret)
1209                                 goto err_cleanup;
1210
1211                         mapped_sg->dir = DMA_FROM_DEVICE;
1212                         mapped_sg->mapped = true;
1213
1214                         ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1215                                        0, 1, &split_size, &dst, &dst_nents,
1216                                        gfp_flags);
1217                         if (ret) {
1218                                 dst_nents = mapped_sg->sgt.nents;
1219                                 dst = mapped_sg->sgt.sgl;
1220                         } else {
1221                                 mapped_sg->split_sg = dst;
1222                         }
1223                 }
1224         }
1225
1226         rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1227                                              DMA_DEV_TO_MEM,
1228                                              DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1229         if (!rxd->tx_in) {
1230                 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1231                 ret = -EINVAL;
1232                 goto err_cleanup;
1233         }
1234
1235         rxd->req = (void *)req->base;
1236         rxd->enc = req->enc;
1237         rxd->iv_idx = req->ctx->iv_idx;
1238         rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1239         rxd->tx_in->callback = req->callback;
1240         rxd->tx_in->callback_param = rxd;
1241
1242         tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1243                                          src_nents, DMA_MEM_TO_DEV,
1244                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1245
1246         if (!tx_out) {
1247                 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1248                 ret = -EINVAL;
1249                 goto err_cleanup;
1250         }
1251
1252         /*
1253          * Prepare metadata for DMA engine. This essentially describes the
1254          * crypto algorithm to be used, data sizes, different keys etc.
1255          */
1256         mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1257
1258         sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1259                                    sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1260                            sa_ctx->epib);
1261
1262         ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1263         dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1264
1265         dmaengine_submit(tx_out);
1266         dmaengine_submit(rxd->tx_in);
1267
1268         dma_async_issue_pending(dma_rx);
1269         dma_async_issue_pending(pdata->dma_tx);
1270
1271         return -EINPROGRESS;
1272
1273 err_cleanup:
1274         sa_free_sa_rx_data(rxd);
1275
1276         return ret;
1277 }
1278
1279 static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1280 {
1281         struct sa_tfm_ctx *ctx =
1282             crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1283         struct crypto_alg *alg = req->base.tfm->__crt_alg;
1284         struct sa_req sa_req = { 0 };
1285
1286         if (!req->cryptlen)
1287                 return 0;
1288
1289         if (req->cryptlen % alg->cra_blocksize)
1290                 return -EINVAL;
1291
1292         /* Use SW fallback if the data size is not supported */
1293         if (req->cryptlen > SA_MAX_DATA_SZ ||
1294             (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1295              req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1296                 struct skcipher_request *subreq = skcipher_request_ctx(req);
1297
1298                 skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1299                 skcipher_request_set_callback(subreq, req->base.flags,
1300                                               req->base.complete,
1301                                               req->base.data);
1302                 skcipher_request_set_crypt(subreq, req->src, req->dst,
1303                                            req->cryptlen, req->iv);
1304                 if (enc)
1305                         return crypto_skcipher_encrypt(subreq);
1306                 else
1307                         return crypto_skcipher_decrypt(subreq);
1308         }
1309
1310         sa_req.size = req->cryptlen;
1311         sa_req.enc_size = req->cryptlen;
1312         sa_req.src = req->src;
1313         sa_req.dst = req->dst;
1314         sa_req.enc_iv = iv;
1315         sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1316         sa_req.enc = enc;
1317         sa_req.callback = sa_aes_dma_in_callback;
1318         sa_req.mdata_size = 44;
1319         sa_req.base = &req->base;
1320         sa_req.ctx = ctx;
1321
1322         return sa_run(&sa_req);
1323 }
1324
1325 static int sa_encrypt(struct skcipher_request *req)
1326 {
1327         return sa_cipher_run(req, req->iv, 1);
1328 }
1329
1330 static int sa_decrypt(struct skcipher_request *req)
1331 {
1332         return sa_cipher_run(req, req->iv, 0);
1333 }
1334
1335 static void sa_sha_dma_in_callback(void *data)
1336 {
1337         struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1338         struct ahash_request *req;
1339         struct crypto_ahash *tfm;
1340         unsigned int authsize;
1341         int i;
1342         size_t ml, pl;
1343         u32 *result;
1344         __be32 *mdptr;
1345
1346         sa_sync_from_device(rxd);
1347         req = container_of(rxd->req, struct ahash_request, base);
1348         tfm = crypto_ahash_reqtfm(req);
1349         authsize = crypto_ahash_digestsize(tfm);
1350
1351         mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1352         result = (u32 *)req->result;
1353
1354         for (i = 0; i < (authsize / 4); i++)
1355                 result[i] = be32_to_cpu(mdptr[i + 4]);
1356
1357         sa_free_sa_rx_data(rxd);
1358
1359         ahash_request_complete(req, 0);
1360 }
1361
1362 static int zero_message_process(struct ahash_request *req)
1363 {
1364         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1365         int sa_digest_size = crypto_ahash_digestsize(tfm);
1366
1367         switch (sa_digest_size) {
1368         case SHA1_DIGEST_SIZE:
1369                 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1370                 break;
1371         case SHA256_DIGEST_SIZE:
1372                 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1373                 break;
1374         case SHA512_DIGEST_SIZE:
1375                 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1376                 break;
1377         default:
1378                 return -EINVAL;
1379         }
1380
1381         return 0;
1382 }
1383
1384 static int sa_sha_run(struct ahash_request *req)
1385 {
1386         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1387         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1388         struct sa_req sa_req = { 0 };
1389         size_t auth_len;
1390
1391         auth_len = req->nbytes;
1392
1393         if (!auth_len)
1394                 return zero_message_process(req);
1395
1396         if (auth_len > SA_MAX_DATA_SZ ||
1397             (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1398              auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1399                 struct ahash_request *subreq = &rctx->fallback_req;
1400                 int ret = 0;
1401
1402                 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1403                 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1404
1405                 crypto_ahash_init(subreq);
1406
1407                 subreq->nbytes = auth_len;
1408                 subreq->src = req->src;
1409                 subreq->result = req->result;
1410
1411                 ret |= crypto_ahash_update(subreq);
1412
1413                 subreq->nbytes = 0;
1414
1415                 ret |= crypto_ahash_final(subreq);
1416
1417                 return ret;
1418         }
1419
1420         sa_req.size = auth_len;
1421         sa_req.auth_size = auth_len;
1422         sa_req.src = req->src;
1423         sa_req.dst = req->src;
1424         sa_req.enc = true;
1425         sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1426         sa_req.callback = sa_sha_dma_in_callback;
1427         sa_req.mdata_size = 28;
1428         sa_req.ctx = ctx;
1429         sa_req.base = &req->base;
1430
1431         return sa_run(&sa_req);
1432 }
1433
1434 static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1435 {
1436         int bs = crypto_shash_blocksize(ctx->shash);
1437         int cmdl_len;
1438         struct sa_cmdl_cfg cfg;
1439
1440         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1441         ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1442         ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1443
1444         memset(ctx->authkey, 0, bs);
1445         memset(&cfg, 0, sizeof(cfg));
1446         cfg.aalg = ad->aalg_id;
1447         cfg.enc_eng_id = ad->enc_eng.eng_id;
1448         cfg.auth_eng_id = ad->auth_eng.eng_id;
1449         cfg.iv_size = 0;
1450         cfg.akey = NULL;
1451         cfg.akey_len = 0;
1452
1453         /* Setup Encryption Security Context & Command label template */
1454         if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
1455                        &ctx->enc.epib[1]))
1456                 goto badkey;
1457
1458         cmdl_len = sa_format_cmdl_gen(&cfg,
1459                                       (u8 *)ctx->enc.cmdl,
1460                                       &ctx->enc.cmdl_upd_info);
1461         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1462                 goto badkey;
1463
1464         ctx->enc.cmdl_size = cmdl_len;
1465
1466         return 0;
1467
1468 badkey:
1469         dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1470         return -EINVAL;
1471 }
1472
1473 static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1474 {
1475         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1476         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1477         int ret;
1478
1479         memset(ctx, 0, sizeof(*ctx));
1480         ctx->dev_data = data;
1481         ret = sa_init_ctx_info(&ctx->enc, data);
1482         if (ret)
1483                 return ret;
1484
1485         if (alg_base) {
1486                 ctx->shash = crypto_alloc_shash(alg_base, 0,
1487                                                 CRYPTO_ALG_NEED_FALLBACK);
1488                 if (IS_ERR(ctx->shash)) {
1489                         dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1490                                 alg_base);
1491                         return PTR_ERR(ctx->shash);
1492                 }
1493                 /* for fallback */
1494                 ctx->fallback.ahash =
1495                         crypto_alloc_ahash(alg_base, 0,
1496                                            CRYPTO_ALG_NEED_FALLBACK);
1497                 if (IS_ERR(ctx->fallback.ahash)) {
1498                         dev_err(ctx->dev_data->dev,
1499                                 "Could not load fallback driver\n");
1500                         return PTR_ERR(ctx->fallback.ahash);
1501                 }
1502         }
1503
1504         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1505                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1506                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1507
1508         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1509                                  sizeof(struct sa_sha_req_ctx) +
1510                                  crypto_ahash_reqsize(ctx->fallback.ahash));
1511
1512         return 0;
1513 }
1514
1515 static int sa_sha_digest(struct ahash_request *req)
1516 {
1517         return sa_sha_run(req);
1518 }
1519
1520 static int sa_sha_init(struct ahash_request *req)
1521 {
1522         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1523         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1524         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1525
1526         dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1527                 crypto_ahash_digestsize(tfm), rctx);
1528
1529         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1530         rctx->fallback_req.base.flags =
1531                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1532
1533         return crypto_ahash_init(&rctx->fallback_req);
1534 }
1535
1536 static int sa_sha_update(struct ahash_request *req)
1537 {
1538         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1539         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1540         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1541
1542         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1543         rctx->fallback_req.base.flags =
1544                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1545         rctx->fallback_req.nbytes = req->nbytes;
1546         rctx->fallback_req.src = req->src;
1547
1548         return crypto_ahash_update(&rctx->fallback_req);
1549 }
1550
1551 static int sa_sha_final(struct ahash_request *req)
1552 {
1553         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1554         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1555         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1556
1557         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1558         rctx->fallback_req.base.flags =
1559                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1560         rctx->fallback_req.result = req->result;
1561
1562         return crypto_ahash_final(&rctx->fallback_req);
1563 }
1564
1565 static int sa_sha_finup(struct ahash_request *req)
1566 {
1567         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1568         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1569         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1570
1571         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1572         rctx->fallback_req.base.flags =
1573                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1574
1575         rctx->fallback_req.nbytes = req->nbytes;
1576         rctx->fallback_req.src = req->src;
1577         rctx->fallback_req.result = req->result;
1578
1579         return crypto_ahash_finup(&rctx->fallback_req);
1580 }
1581
1582 static int sa_sha_import(struct ahash_request *req, const void *in)
1583 {
1584         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1585         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1586         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1587
1588         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1589         rctx->fallback_req.base.flags = req->base.flags &
1590                 CRYPTO_TFM_REQ_MAY_SLEEP;
1591
1592         return crypto_ahash_import(&rctx->fallback_req, in);
1593 }
1594
1595 static int sa_sha_export(struct ahash_request *req, void *out)
1596 {
1597         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1598         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1599         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1600         struct ahash_request *subreq = &rctx->fallback_req;
1601
1602         ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1603         subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1604
1605         return crypto_ahash_export(subreq, out);
1606 }
1607
1608 static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1609 {
1610         struct algo_data ad = { 0 };
1611         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1612
1613         sa_sha_cra_init_alg(tfm, "sha1");
1614
1615         ad.aalg_id = SA_AALG_ID_SHA1;
1616         ad.hash_size = SHA1_DIGEST_SIZE;
1617         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1618
1619         sa_sha_setup(ctx, &ad);
1620
1621         return 0;
1622 }
1623
1624 static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1625 {
1626         struct algo_data ad = { 0 };
1627         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1628
1629         sa_sha_cra_init_alg(tfm, "sha256");
1630
1631         ad.aalg_id = SA_AALG_ID_SHA2_256;
1632         ad.hash_size = SHA256_DIGEST_SIZE;
1633         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1634
1635         sa_sha_setup(ctx, &ad);
1636
1637         return 0;
1638 }
1639
1640 static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1641 {
1642         struct algo_data ad = { 0 };
1643         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1644
1645         sa_sha_cra_init_alg(tfm, "sha512");
1646
1647         ad.aalg_id = SA_AALG_ID_SHA2_512;
1648         ad.hash_size = SHA512_DIGEST_SIZE;
1649         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1650
1651         sa_sha_setup(ctx, &ad);
1652
1653         return 0;
1654 }
1655
1656 static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1657 {
1658         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1659         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1660
1661         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1662                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1663                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1664
1665         if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1666                 sa_free_ctx_info(&ctx->enc, data);
1667
1668         crypto_free_shash(ctx->shash);
1669         crypto_free_ahash(ctx->fallback.ahash);
1670 }
1671
1672 static void sa_aead_dma_in_callback(void *data)
1673 {
1674         struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1675         struct aead_request *req;
1676         struct crypto_aead *tfm;
1677         unsigned int start;
1678         unsigned int authsize;
1679         u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1680         size_t pl, ml;
1681         int i;
1682         int err = 0;
1683         u16 auth_len;
1684         u32 *mdptr;
1685
1686         sa_sync_from_device(rxd);
1687         req = container_of(rxd->req, struct aead_request, base);
1688         tfm = crypto_aead_reqtfm(req);
1689         start = req->assoclen + req->cryptlen;
1690         authsize = crypto_aead_authsize(tfm);
1691
1692         mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1693         for (i = 0; i < (authsize / 4); i++)
1694                 mdptr[i + 4] = swab32(mdptr[i + 4]);
1695
1696         auth_len = req->assoclen + req->cryptlen;
1697
1698         if (rxd->enc) {
1699                 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1700                                          1);
1701         } else {
1702                 auth_len -= authsize;
1703                 start -= authsize;
1704                 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1705                                          0);
1706
1707                 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1708         }
1709
1710         sa_free_sa_rx_data(rxd);
1711
1712         aead_request_complete(req, err);
1713 }
1714
1715 static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1716                             const char *fallback)
1717 {
1718         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1719         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1720         int ret;
1721
1722         memzero_explicit(ctx, sizeof(*ctx));
1723
1724         ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1725         if (IS_ERR(ctx->shash)) {
1726                 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1727                 return PTR_ERR(ctx->shash);
1728         }
1729
1730         ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1731                                                CRYPTO_ALG_NEED_FALLBACK);
1732
1733         if (IS_ERR(ctx->fallback.aead)) {
1734                 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1735                         fallback);
1736                 return PTR_ERR(ctx->fallback.aead);
1737         }
1738
1739         crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1740                                 crypto_aead_reqsize(ctx->fallback.aead));
1741
1742         ret = sa_init_ctx_info(&ctx->enc, data);
1743         if (ret)
1744                 return ret;
1745
1746         ret = sa_init_ctx_info(&ctx->dec, data);
1747         if (ret) {
1748                 sa_free_ctx_info(&ctx->enc, data);
1749                 return ret;
1750         }
1751
1752         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1753                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1754                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1755
1756         return ret;
1757 }
1758
1759 static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1760 {
1761         return sa_cra_init_aead(tfm, "sha1",
1762                                 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1763 }
1764
1765 static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1766 {
1767         return sa_cra_init_aead(tfm, "sha256",
1768                                 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1769 }
1770
1771 static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1772 {
1773         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1774         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1775
1776         crypto_free_shash(ctx->shash);
1777         crypto_free_aead(ctx->fallback.aead);
1778
1779         sa_free_ctx_info(&ctx->enc, data);
1780         sa_free_ctx_info(&ctx->dec, data);
1781 }
1782
1783 /* AEAD algorithm configuration interface function */
1784 static int sa_aead_setkey(struct crypto_aead *authenc,
1785                           const u8 *key, unsigned int keylen,
1786                           struct algo_data *ad)
1787 {
1788         struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1789         struct crypto_authenc_keys keys;
1790         int cmdl_len;
1791         struct sa_cmdl_cfg cfg;
1792         int key_idx;
1793
1794         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1795                 return -EINVAL;
1796
1797         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1798         key_idx = (keys.enckeylen >> 3) - 2;
1799         if (key_idx >= 3)
1800                 return -EINVAL;
1801
1802         ad->ctx = ctx;
1803         ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1804         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1805         ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1806         ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1807         ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1808         ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1809         ad->inv_key = true;
1810         ad->keyed_mac = true;
1811         ad->ealg_id = SA_EALG_ID_AES_CBC;
1812         ad->prep_iopad = sa_prepare_iopads;
1813
1814         memset(&cfg, 0, sizeof(cfg));
1815         cfg.enc = true;
1816         cfg.aalg = ad->aalg_id;
1817         cfg.enc_eng_id = ad->enc_eng.eng_id;
1818         cfg.auth_eng_id = ad->auth_eng.eng_id;
1819         cfg.iv_size = crypto_aead_ivsize(authenc);
1820         cfg.akey = keys.authkey;
1821         cfg.akey_len = keys.authkeylen;
1822
1823         /* Setup Encryption Security Context & Command label template */
1824         if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
1825                        keys.authkey, keys.authkeylen,
1826                        ad, 1, &ctx->enc.epib[1]))
1827                 return -EINVAL;
1828
1829         cmdl_len = sa_format_cmdl_gen(&cfg,
1830                                       (u8 *)ctx->enc.cmdl,
1831                                       &ctx->enc.cmdl_upd_info);
1832         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1833                 return -EINVAL;
1834
1835         ctx->enc.cmdl_size = cmdl_len;
1836
1837         /* Setup Decryption Security Context & Command label template */
1838         if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
1839                        keys.authkey, keys.authkeylen,
1840                        ad, 0, &ctx->dec.epib[1]))
1841                 return -EINVAL;
1842
1843         cfg.enc = false;
1844         cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1845                                       &ctx->dec.cmdl_upd_info);
1846
1847         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1848                 return -EINVAL;
1849
1850         ctx->dec.cmdl_size = cmdl_len;
1851
1852         crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1853         crypto_aead_set_flags(ctx->fallback.aead,
1854                               crypto_aead_get_flags(authenc) &
1855                               CRYPTO_TFM_REQ_MASK);
1856         crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1857
1858         return 0;
1859 }
1860
1861 static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1862 {
1863         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1864
1865         return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1866 }
1867
1868 static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1869                                    const u8 *key, unsigned int keylen)
1870 {
1871         struct algo_data ad = { 0 };
1872
1873         ad.ealg_id = SA_EALG_ID_AES_CBC;
1874         ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1875         ad.hash_size = SHA1_DIGEST_SIZE;
1876         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1877
1878         return sa_aead_setkey(authenc, key, keylen, &ad);
1879 }
1880
1881 static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1882                                      const u8 *key, unsigned int keylen)
1883 {
1884         struct algo_data ad = { 0 };
1885
1886         ad.ealg_id = SA_EALG_ID_AES_CBC;
1887         ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1888         ad.hash_size = SHA256_DIGEST_SIZE;
1889         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1890
1891         return sa_aead_setkey(authenc, key, keylen, &ad);
1892 }
1893
1894 static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1895 {
1896         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1897         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1898         struct sa_req sa_req = { 0 };
1899         size_t auth_size, enc_size;
1900
1901         enc_size = req->cryptlen;
1902         auth_size = req->assoclen + req->cryptlen;
1903
1904         if (!enc) {
1905                 enc_size -= crypto_aead_authsize(tfm);
1906                 auth_size -= crypto_aead_authsize(tfm);
1907         }
1908
1909         if (auth_size > SA_MAX_DATA_SZ ||
1910             (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1911              auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1912                 struct aead_request *subreq = aead_request_ctx(req);
1913                 int ret;
1914
1915                 aead_request_set_tfm(subreq, ctx->fallback.aead);
1916                 aead_request_set_callback(subreq, req->base.flags,
1917                                           req->base.complete, req->base.data);
1918                 aead_request_set_crypt(subreq, req->src, req->dst,
1919                                        req->cryptlen, req->iv);
1920                 aead_request_set_ad(subreq, req->assoclen);
1921
1922                 ret = enc ? crypto_aead_encrypt(subreq) :
1923                         crypto_aead_decrypt(subreq);
1924                 return ret;
1925         }
1926
1927         sa_req.enc_offset = req->assoclen;
1928         sa_req.enc_size = enc_size;
1929         sa_req.auth_size = auth_size;
1930         sa_req.size = auth_size;
1931         sa_req.enc_iv = iv;
1932         sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1933         sa_req.enc = enc;
1934         sa_req.callback = sa_aead_dma_in_callback;
1935         sa_req.mdata_size = 52;
1936         sa_req.base = &req->base;
1937         sa_req.ctx = ctx;
1938         sa_req.src = req->src;
1939         sa_req.dst = req->dst;
1940
1941         return sa_run(&sa_req);
1942 }
1943
1944 /* AEAD algorithm encrypt interface function */
1945 static int sa_aead_encrypt(struct aead_request *req)
1946 {
1947         return sa_aead_run(req, req->iv, 1);
1948 }
1949
1950 /* AEAD algorithm decrypt interface function */
1951 static int sa_aead_decrypt(struct aead_request *req)
1952 {
1953         return sa_aead_run(req, req->iv, 0);
1954 }
1955
1956 static struct sa_alg_tmpl sa_algs[] = {
1957         {
1958                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1959                 .alg.skcipher = {
1960                         .base.cra_name          = "cbc(aes)",
1961                         .base.cra_driver_name   = "cbc-aes-sa2ul",
1962                         .base.cra_priority      = 30000,
1963                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
1964                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
1965                                                   CRYPTO_ALG_ASYNC |
1966                                                   CRYPTO_ALG_NEED_FALLBACK,
1967                         .base.cra_blocksize     = AES_BLOCK_SIZE,
1968                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
1969                         .base.cra_module        = THIS_MODULE,
1970                         .init                   = sa_cipher_cra_init,
1971                         .exit                   = sa_cipher_cra_exit,
1972                         .min_keysize            = AES_MIN_KEY_SIZE,
1973                         .max_keysize            = AES_MAX_KEY_SIZE,
1974                         .ivsize                 = AES_BLOCK_SIZE,
1975                         .setkey                 = sa_aes_cbc_setkey,
1976                         .encrypt                = sa_encrypt,
1977                         .decrypt                = sa_decrypt,
1978                 }
1979         },
1980         {
1981                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1982                 .alg.skcipher = {
1983                         .base.cra_name          = "ecb(aes)",
1984                         .base.cra_driver_name   = "ecb-aes-sa2ul",
1985                         .base.cra_priority      = 30000,
1986                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
1987                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
1988                                                   CRYPTO_ALG_ASYNC |
1989                                                   CRYPTO_ALG_NEED_FALLBACK,
1990                         .base.cra_blocksize     = AES_BLOCK_SIZE,
1991                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
1992                         .base.cra_module        = THIS_MODULE,
1993                         .init                   = sa_cipher_cra_init,
1994                         .exit                   = sa_cipher_cra_exit,
1995                         .min_keysize            = AES_MIN_KEY_SIZE,
1996                         .max_keysize            = AES_MAX_KEY_SIZE,
1997                         .setkey                 = sa_aes_ecb_setkey,
1998                         .encrypt                = sa_encrypt,
1999                         .decrypt                = sa_decrypt,
2000                 }
2001         },
2002         {
2003                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2004                 .alg.skcipher = {
2005                         .base.cra_name          = "cbc(des3_ede)",
2006                         .base.cra_driver_name   = "cbc-des3-sa2ul",
2007                         .base.cra_priority      = 30000,
2008                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2009                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2010                                                   CRYPTO_ALG_ASYNC |
2011                                                   CRYPTO_ALG_NEED_FALLBACK,
2012                         .base.cra_blocksize     = DES_BLOCK_SIZE,
2013                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2014                         .base.cra_module        = THIS_MODULE,
2015                         .init                   = sa_cipher_cra_init,
2016                         .exit                   = sa_cipher_cra_exit,
2017                         .min_keysize            = 3 * DES_KEY_SIZE,
2018                         .max_keysize            = 3 * DES_KEY_SIZE,
2019                         .ivsize                 = DES_BLOCK_SIZE,
2020                         .setkey                 = sa_3des_cbc_setkey,
2021                         .encrypt                = sa_encrypt,
2022                         .decrypt                = sa_decrypt,
2023                 }
2024         },
2025         {
2026                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2027                 .alg.skcipher = {
2028                         .base.cra_name          = "ecb(des3_ede)",
2029                         .base.cra_driver_name   = "ecb-des3-sa2ul",
2030                         .base.cra_priority      = 30000,
2031                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2032                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2033                                                   CRYPTO_ALG_ASYNC |
2034                                                   CRYPTO_ALG_NEED_FALLBACK,
2035                         .base.cra_blocksize     = DES_BLOCK_SIZE,
2036                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2037                         .base.cra_module        = THIS_MODULE,
2038                         .init                   = sa_cipher_cra_init,
2039                         .exit                   = sa_cipher_cra_exit,
2040                         .min_keysize            = 3 * DES_KEY_SIZE,
2041                         .max_keysize            = 3 * DES_KEY_SIZE,
2042                         .setkey                 = sa_3des_ecb_setkey,
2043                         .encrypt                = sa_encrypt,
2044                         .decrypt                = sa_decrypt,
2045                 }
2046         },
2047         {
2048                 .type = CRYPTO_ALG_TYPE_AHASH,
2049                 .alg.ahash = {
2050                         .halg.base = {
2051                                 .cra_name       = "sha1",
2052                                 .cra_driver_name        = "sha1-sa2ul",
2053                                 .cra_priority   = 400,
2054                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2055                                                   CRYPTO_ALG_ASYNC |
2056                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2057                                                   CRYPTO_ALG_NEED_FALLBACK,
2058                                 .cra_blocksize  = SHA1_BLOCK_SIZE,
2059                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2060                                 .cra_module     = THIS_MODULE,
2061                                 .cra_init       = sa_sha1_cra_init,
2062                                 .cra_exit       = sa_sha_cra_exit,
2063                         },
2064                         .halg.digestsize        = SHA1_DIGEST_SIZE,
2065                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2066                                                   sizeof(struct sha1_state),
2067                         .init                   = sa_sha_init,
2068                         .update                 = sa_sha_update,
2069                         .final                  = sa_sha_final,
2070                         .finup                  = sa_sha_finup,
2071                         .digest                 = sa_sha_digest,
2072                         .export                 = sa_sha_export,
2073                         .import                 = sa_sha_import,
2074                 },
2075         },
2076         {
2077                 .type = CRYPTO_ALG_TYPE_AHASH,
2078                 .alg.ahash = {
2079                         .halg.base = {
2080                                 .cra_name       = "sha256",
2081                                 .cra_driver_name        = "sha256-sa2ul",
2082                                 .cra_priority   = 400,
2083                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2084                                                   CRYPTO_ALG_ASYNC |
2085                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2086                                                   CRYPTO_ALG_NEED_FALLBACK,
2087                                 .cra_blocksize  = SHA256_BLOCK_SIZE,
2088                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2089                                 .cra_module     = THIS_MODULE,
2090                                 .cra_init       = sa_sha256_cra_init,
2091                                 .cra_exit       = sa_sha_cra_exit,
2092                         },
2093                         .halg.digestsize        = SHA256_DIGEST_SIZE,
2094                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2095                                                   sizeof(struct sha256_state),
2096                         .init                   = sa_sha_init,
2097                         .update                 = sa_sha_update,
2098                         .final                  = sa_sha_final,
2099                         .finup                  = sa_sha_finup,
2100                         .digest                 = sa_sha_digest,
2101                         .export                 = sa_sha_export,
2102                         .import                 = sa_sha_import,
2103                 },
2104         },
2105         {
2106                 .type = CRYPTO_ALG_TYPE_AHASH,
2107                 .alg.ahash = {
2108                         .halg.base = {
2109                                 .cra_name       = "sha512",
2110                                 .cra_driver_name        = "sha512-sa2ul",
2111                                 .cra_priority   = 400,
2112                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2113                                                   CRYPTO_ALG_ASYNC |
2114                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2115                                                   CRYPTO_ALG_NEED_FALLBACK,
2116                                 .cra_blocksize  = SHA512_BLOCK_SIZE,
2117                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2118                                 .cra_module     = THIS_MODULE,
2119                                 .cra_init       = sa_sha512_cra_init,
2120                                 .cra_exit       = sa_sha_cra_exit,
2121                         },
2122                         .halg.digestsize        = SHA512_DIGEST_SIZE,
2123                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2124                                                   sizeof(struct sha512_state),
2125                         .init                   = sa_sha_init,
2126                         .update                 = sa_sha_update,
2127                         .final                  = sa_sha_final,
2128                         .finup                  = sa_sha_finup,
2129                         .digest                 = sa_sha_digest,
2130                         .export                 = sa_sha_export,
2131                         .import                 = sa_sha_import,
2132                 },
2133         },
2134         {
2135                 .type   = CRYPTO_ALG_TYPE_AEAD,
2136                 .alg.aead = {
2137                         .base = {
2138                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2139                                 .cra_driver_name =
2140                                         "authenc(hmac(sha1),cbc(aes))-sa2ul",
2141                                 .cra_blocksize = AES_BLOCK_SIZE,
2142                                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2143                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
2144                                         CRYPTO_ALG_ASYNC |
2145                                         CRYPTO_ALG_NEED_FALLBACK,
2146                                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2147                                 .cra_module = THIS_MODULE,
2148                                 .cra_priority = 3000,
2149                         },
2150                         .ivsize = AES_BLOCK_SIZE,
2151                         .maxauthsize = SHA1_DIGEST_SIZE,
2152
2153                         .init = sa_cra_init_aead_sha1,
2154                         .exit = sa_exit_tfm_aead,
2155                         .setkey = sa_aead_cbc_sha1_setkey,
2156                         .setauthsize = sa_aead_setauthsize,
2157                         .encrypt = sa_aead_encrypt,
2158                         .decrypt = sa_aead_decrypt,
2159                 },
2160         },
2161         {
2162                 .type   = CRYPTO_ALG_TYPE_AEAD,
2163                 .alg.aead = {
2164                         .base = {
2165                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2166                                 .cra_driver_name =
2167                                         "authenc(hmac(sha256),cbc(aes))-sa2ul",
2168                                 .cra_blocksize = AES_BLOCK_SIZE,
2169                                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2170                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
2171                                         CRYPTO_ALG_ASYNC |
2172                                         CRYPTO_ALG_NEED_FALLBACK,
2173                                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2174                                 .cra_module = THIS_MODULE,
2175                                 .cra_alignmask = 0,
2176                                 .cra_priority = 3000,
2177                         },
2178                         .ivsize = AES_BLOCK_SIZE,
2179                         .maxauthsize = SHA256_DIGEST_SIZE,
2180
2181                         .init = sa_cra_init_aead_sha256,
2182                         .exit = sa_exit_tfm_aead,
2183                         .setkey = sa_aead_cbc_sha256_setkey,
2184                         .setauthsize = sa_aead_setauthsize,
2185                         .encrypt = sa_aead_encrypt,
2186                         .decrypt = sa_aead_decrypt,
2187                 },
2188         },
2189 };
2190
2191 /* Register the algorithms in crypto framework */
2192 static void sa_register_algos(const struct device *dev)
2193 {
2194         char *alg_name;
2195         u32 type;
2196         int i, err;
2197
2198         for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2199                 type = sa_algs[i].type;
2200                 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2201                         alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2202                         err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2203                 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2204                         alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2205                         err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2206                 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2207                         alg_name = sa_algs[i].alg.aead.base.cra_name;
2208                         err = crypto_register_aead(&sa_algs[i].alg.aead);
2209                 } else {
2210                         dev_err(dev,
2211                                 "un-supported crypto algorithm (%d)",
2212                                 sa_algs[i].type);
2213                         continue;
2214                 }
2215
2216                 if (err)
2217                         dev_err(dev, "Failed to register '%s'\n", alg_name);
2218                 else
2219                         sa_algs[i].registered = true;
2220         }
2221 }
2222
2223 /* Unregister the algorithms in crypto framework */
2224 static void sa_unregister_algos(const struct device *dev)
2225 {
2226         u32 type;
2227         int i;
2228
2229         for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2230                 type = sa_algs[i].type;
2231                 if (!sa_algs[i].registered)
2232                         continue;
2233                 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2234                         crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2235                 else if (type == CRYPTO_ALG_TYPE_AHASH)
2236                         crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2237                 else if (type == CRYPTO_ALG_TYPE_AEAD)
2238                         crypto_unregister_aead(&sa_algs[i].alg.aead);
2239
2240                 sa_algs[i].registered = false;
2241         }
2242 }
2243
2244 static int sa_init_mem(struct sa_crypto_data *dev_data)
2245 {
2246         struct device *dev = &dev_data->pdev->dev;
2247         /* Setup dma pool for security context buffers */
2248         dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2249                                             SA_CTX_MAX_SZ, 64, 0);
2250         if (!dev_data->sc_pool) {
2251                 dev_err(dev, "Failed to create dma pool");
2252                 return -ENOMEM;
2253         }
2254
2255         return 0;
2256 }
2257
2258 static int sa_dma_init(struct sa_crypto_data *dd)
2259 {
2260         int ret;
2261         struct dma_slave_config cfg;
2262
2263         dd->dma_rx1 = NULL;
2264         dd->dma_tx = NULL;
2265         dd->dma_rx2 = NULL;
2266
2267         ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2268         if (ret)
2269                 return ret;
2270
2271         dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2272         if (IS_ERR(dd->dma_rx1))
2273                 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2274                                      "Unable to request rx1 DMA channel\n");
2275
2276         dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2277         if (IS_ERR(dd->dma_rx2)) {
2278                 dma_release_channel(dd->dma_rx1);
2279                 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2280                                      "Unable to request rx2 DMA channel\n");
2281         }
2282
2283         dd->dma_tx = dma_request_chan(dd->dev, "tx");
2284         if (IS_ERR(dd->dma_tx)) {
2285                 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2286                                     "Unable to request tx DMA channel\n");
2287                 goto err_dma_tx;
2288         }
2289
2290         memzero_explicit(&cfg, sizeof(cfg));
2291
2292         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2293         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2294         cfg.src_maxburst = 4;
2295         cfg.dst_maxburst = 4;
2296
2297         ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2298         if (ret) {
2299                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2300                         ret);
2301                 return ret;
2302         }
2303
2304         ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2305         if (ret) {
2306                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2307                         ret);
2308                 return ret;
2309         }
2310
2311         ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2312         if (ret) {
2313                 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2314                         ret);
2315                 return ret;
2316         }
2317
2318         return 0;
2319
2320 err_dma_tx:
2321         dma_release_channel(dd->dma_rx1);
2322         dma_release_channel(dd->dma_rx2);
2323
2324         return ret;
2325 }
2326
2327 static int sa_link_child(struct device *dev, void *data)
2328 {
2329         struct device *parent = data;
2330
2331         device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2332
2333         return 0;
2334 }
2335
2336 static int sa_ul_probe(struct platform_device *pdev)
2337 {
2338         struct device *dev = &pdev->dev;
2339         struct device_node *node = dev->of_node;
2340         struct resource *res;
2341         static void __iomem *saul_base;
2342         struct sa_crypto_data *dev_data;
2343         u32 val;
2344         int ret;
2345
2346         dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2347         if (!dev_data)
2348                 return -ENOMEM;
2349
2350         sa_k3_dev = dev;
2351         dev_data->dev = dev;
2352         dev_data->pdev = pdev;
2353         platform_set_drvdata(pdev, dev_data);
2354         dev_set_drvdata(sa_k3_dev, dev_data);
2355
2356         pm_runtime_enable(dev);
2357         ret = pm_runtime_resume_and_get(dev);
2358         if (ret < 0) {
2359                 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
2360                         ret);
2361                 return ret;
2362         }
2363
2364         sa_init_mem(dev_data);
2365         ret = sa_dma_init(dev_data);
2366         if (ret)
2367                 goto disable_pm_runtime;
2368
2369         spin_lock_init(&dev_data->scid_lock);
2370         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2371         saul_base = devm_ioremap_resource(dev, res);
2372
2373         dev_data->base = saul_base;
2374         val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2375             SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2376             SA_EEC_TRNG_EN;
2377
2378         writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2379
2380         sa_register_algos(dev);
2381
2382         ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2383         if (ret)
2384                 goto release_dma;
2385
2386         device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
2387
2388         return 0;
2389
2390 release_dma:
2391         sa_unregister_algos(&pdev->dev);
2392
2393         dma_release_channel(dev_data->dma_rx2);
2394         dma_release_channel(dev_data->dma_rx1);
2395         dma_release_channel(dev_data->dma_tx);
2396
2397         dma_pool_destroy(dev_data->sc_pool);
2398
2399 disable_pm_runtime:
2400         pm_runtime_put_sync(&pdev->dev);
2401         pm_runtime_disable(&pdev->dev);
2402
2403         return ret;
2404 }
2405
2406 static int sa_ul_remove(struct platform_device *pdev)
2407 {
2408         struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2409
2410         sa_unregister_algos(&pdev->dev);
2411
2412         dma_release_channel(dev_data->dma_rx2);
2413         dma_release_channel(dev_data->dma_rx1);
2414         dma_release_channel(dev_data->dma_tx);
2415
2416         dma_pool_destroy(dev_data->sc_pool);
2417
2418         platform_set_drvdata(pdev, NULL);
2419
2420         pm_runtime_put_sync(&pdev->dev);
2421         pm_runtime_disable(&pdev->dev);
2422
2423         return 0;
2424 }
2425
2426 static const struct of_device_id of_match[] = {
2427         {.compatible = "ti,j721e-sa2ul",},
2428         {.compatible = "ti,am654-sa2ul",},
2429         {},
2430 };
2431 MODULE_DEVICE_TABLE(of, of_match);
2432
2433 static struct platform_driver sa_ul_driver = {
2434         .probe = sa_ul_probe,
2435         .remove = sa_ul_remove,
2436         .driver = {
2437                    .name = "saul-crypto",
2438                    .of_match_table = of_match,
2439                    },
2440 };
2441 module_platform_driver(sa_ul_driver);
2442 MODULE_LICENSE("GPL v2");