Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[linux-2.6-microblaze.git] / drivers / crypto / sa2ul.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * K3 SA2UL crypto accelerator driver
4  *
5  * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Authors:     Keerthy
8  *              Vitaly Andrianov
9  *              Tero Kristo
10  */
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of_device.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20
21 #include <crypto/aes.h>
22 #include <crypto/authenc.h>
23 #include <crypto/des.h>
24 #include <crypto/internal/aead.h>
25 #include <crypto/internal/hash.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/sha1.h>
29 #include <crypto/sha2.h>
30
31 #include "sa2ul.h"
32
33 /* Byte offset for key in encryption security context */
34 #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
35 /* Byte offset for Aux-1 in encryption security context */
36 #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
37
38 #define SA_CMDL_UPD_ENC         0x0001
39 #define SA_CMDL_UPD_AUTH        0x0002
40 #define SA_CMDL_UPD_ENC_IV      0x0004
41 #define SA_CMDL_UPD_AUTH_IV     0x0008
42 #define SA_CMDL_UPD_AUX_KEY     0x0010
43
44 #define SA_AUTH_SUBKEY_LEN      16
45 #define SA_CMDL_PAYLOAD_LENGTH_MASK     0xFFFF
46 #define SA_CMDL_SOP_BYPASS_LEN_MASK     0xFF000000
47
48 #define MODE_CONTROL_BYTES      27
49 #define SA_HASH_PROCESSING      0
50 #define SA_CRYPTO_PROCESSING    0
51 #define SA_UPLOAD_HASH_TO_TLR   BIT(6)
52
53 #define SA_SW0_FLAGS_MASK       0xF0000
54 #define SA_SW0_CMDL_INFO_MASK   0x1F00000
55 #define SA_SW0_CMDL_PRESENT     BIT(4)
56 #define SA_SW0_ENG_ID_MASK      0x3E000000
57 #define SA_SW0_DEST_INFO_PRESENT        BIT(30)
58 #define SA_SW2_EGRESS_LENGTH            0xFF000000
59 #define SA_BASIC_HASH           0x10
60
61 #define SHA256_DIGEST_WORDS    8
62 /* Make 32-bit word from 4 bytes */
63 #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
64                                    ((b2) << 8) | (b3))
65
66 /* size of SCCTL structure in bytes */
67 #define SA_SCCTL_SZ 16
68
69 /* Max Authentication tag size */
70 #define SA_MAX_AUTH_TAG_SZ 64
71
72 #define PRIV_ID 0x1
73 #define PRIV    0x1
74
75 static struct device *sa_k3_dev;
76
77 /**
78  * struct sa_cmdl_cfg - Command label configuration descriptor
79  * @aalg: authentication algorithm ID
80  * @enc_eng_id: Encryption Engine ID supported by the SA hardware
81  * @auth_eng_id: Authentication Engine ID
82  * @iv_size: Initialization Vector size
83  * @akey: Authentication key
84  * @akey_len: Authentication key length
85  * @enc: True, if this is an encode request
86  */
87 struct sa_cmdl_cfg {
88         int aalg;
89         u8 enc_eng_id;
90         u8 auth_eng_id;
91         u8 iv_size;
92         const u8 *akey;
93         u16 akey_len;
94         bool enc;
95 };
96
97 /**
98  * struct algo_data - Crypto algorithm specific data
99  * @enc_eng: Encryption engine info structure
100  * @auth_eng: Authentication engine info structure
101  * @auth_ctrl: Authentication control word
102  * @hash_size: Size of digest
103  * @iv_idx: iv index in psdata
104  * @iv_out_size: iv out size
105  * @ealg_id: Encryption Algorithm ID
106  * @aalg_id: Authentication algorithm ID
107  * @mci_enc: Mode Control Instruction for Encryption algorithm
108  * @mci_dec: Mode Control Instruction for Decryption
109  * @inv_key: Whether the encryption algorithm demands key inversion
110  * @ctx: Pointer to the algorithm context
111  * @keyed_mac: Whether the authentication algorithm has key
112  * @prep_iopad: Function pointer to generate intermediate ipad/opad
113  */
114 struct algo_data {
115         struct sa_eng_info enc_eng;
116         struct sa_eng_info auth_eng;
117         u8 auth_ctrl;
118         u8 hash_size;
119         u8 iv_idx;
120         u8 iv_out_size;
121         u8 ealg_id;
122         u8 aalg_id;
123         u8 *mci_enc;
124         u8 *mci_dec;
125         bool inv_key;
126         struct sa_tfm_ctx *ctx;
127         bool keyed_mac;
128         void (*prep_iopad)(struct algo_data *algo, const u8 *key,
129                            u16 key_sz, __be32 *ipad, __be32 *opad);
130 };
131
132 /**
133  * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
134  * @type: Type of the crypto algorithm.
135  * @alg: Union of crypto algorithm definitions.
136  * @registered: Flag indicating if the crypto algorithm is already registered
137  */
138 struct sa_alg_tmpl {
139         u32 type;               /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
140         union {
141                 struct skcipher_alg skcipher;
142                 struct ahash_alg ahash;
143                 struct aead_alg aead;
144         } alg;
145         bool registered;
146 };
147
148 /**
149  * struct sa_mapped_sg: scatterlist information for tx and rx
150  * @mapped: Set to true if the @sgt is mapped
151  * @dir: mapping direction used for @sgt
152  * @split_sg: Set if the sg is split and needs to be freed up
153  * @static_sg: Static scatterlist entry for overriding data
154  * @sgt: scatterlist table for DMA API use
155  */
156 struct sa_mapped_sg {
157         bool mapped;
158         enum dma_data_direction dir;
159         struct scatterlist static_sg;
160         struct scatterlist *split_sg;
161         struct sg_table sgt;
162 };
163 /**
164  * struct sa_rx_data: RX Packet miscellaneous data place holder
165  * @req: crypto request data pointer
166  * @ddev: pointer to the DMA device
167  * @tx_in: dma_async_tx_descriptor pointer for rx channel
168  * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
169  * @enc: Flag indicating either encryption or decryption
170  * @enc_iv_size: Initialisation vector size
171  * @iv_idx: Initialisation vector index
172  */
173 struct sa_rx_data {
174         void *req;
175         struct device *ddev;
176         struct dma_async_tx_descriptor *tx_in;
177         struct sa_mapped_sg mapped_sg[2];
178         u8 enc;
179         u8 enc_iv_size;
180         u8 iv_idx;
181 };
182
183 /**
184  * struct sa_req: SA request definition
185  * @dev: device for the request
186  * @size: total data to the xmitted via DMA
187  * @enc_offset: offset of cipher data
188  * @enc_size: data to be passed to cipher engine
189  * @enc_iv: cipher IV
190  * @auth_offset: offset of the authentication data
191  * @auth_size: size of the authentication data
192  * @auth_iv: authentication IV
193  * @type: algorithm type for the request
194  * @cmdl: command label pointer
195  * @base: pointer to the base request
196  * @ctx: pointer to the algorithm context data
197  * @enc: true if this is an encode request
198  * @src: source data
199  * @dst: destination data
200  * @callback: DMA callback for the request
201  * @mdata_size: metadata size passed to DMA
202  */
203 struct sa_req {
204         struct device *dev;
205         u16 size;
206         u8 enc_offset;
207         u16 enc_size;
208         u8 *enc_iv;
209         u8 auth_offset;
210         u16 auth_size;
211         u8 *auth_iv;
212         u32 type;
213         u32 *cmdl;
214         struct crypto_async_request *base;
215         struct sa_tfm_ctx *ctx;
216         bool enc;
217         struct scatterlist *src;
218         struct scatterlist *dst;
219         dma_async_tx_callback callback;
220         u16 mdata_size;
221 };
222
223 /*
224  * Mode Control Instructions for various Key lengths 128, 192, 256
225  * For CBC (Cipher Block Chaining) mode for encryption
226  */
227 static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
228         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
229                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
230                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
231         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
232                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
233                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
234         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
235                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
236                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
237 };
238
239 /*
240  * Mode Control Instructions for various Key lengths 128, 192, 256
241  * For CBC (Cipher Block Chaining) mode for decryption
242  */
243 static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
244         {       0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
245                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
246                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
247         {       0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
248                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
249                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
250         {       0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
251                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
252                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
253 };
254
255 /*
256  * Mode Control Instructions for various Key lengths 128, 192, 256
257  * For CBC (Cipher Block Chaining) mode for encryption
258  */
259 static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
260         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
261                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
263         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
264                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
266         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
267                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
269 };
270
271 /*
272  * Mode Control Instructions for various Key lengths 128, 192, 256
273  * For CBC (Cipher Block Chaining) mode for decryption
274  */
275 static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
276         {       0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
277                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
278                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
279         {       0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
280                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
282         {       0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
283                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
284                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
285 };
286
287 /*
288  * Mode Control Instructions for various Key lengths 128, 192, 256
289  * For ECB (Electronic Code Book) mode for encryption
290  */
291 static u8 mci_ecb_enc_array[3][27] = {
292         {       0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
293                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
295         {       0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
296                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
297                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
298         {       0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
299                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
300                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
301 };
302
303 /*
304  * Mode Control Instructions for various Key lengths 128, 192, 256
305  * For ECB (Electronic Code Book) mode for decryption
306  */
307 static u8 mci_ecb_dec_array[3][27] = {
308         {       0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
309                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
310                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
311         {       0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
312                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
314         {       0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
315                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
316                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
317 };
318
319 /*
320  * Mode Control Instructions for DES algorithm
321  * For CBC (Cipher Block Chaining) mode and ECB mode
322  * encryption and for decryption respectively
323  */
324 static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
325         0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
326         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327         0x00, 0x00, 0x00,
328 };
329
330 static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
331         0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
332         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333         0x00, 0x00, 0x00,
334 };
335
336 static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
337         0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
338         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
339         0x00, 0x00, 0x00,
340 };
341
342 static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
343         0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
344         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00,
346 };
347
348 /*
349  * Perform 16 byte or 128 bit swizzling
350  * The SA2UL Expects the security context to
351  * be in little Endian and the bus width is 128 bits or 16 bytes
352  * Hence swap 16 bytes at a time from higher to lower address
353  */
354 static void sa_swiz_128(u8 *in, u16 len)
355 {
356         u8 data[16];
357         int i, j;
358
359         for (i = 0; i < len; i += 16) {
360                 memcpy(data, &in[i], 16);
361                 for (j = 0; j < 16; j++)
362                         in[i + j] = data[15 - j];
363         }
364 }
365
366 /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
367 static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
368 {
369         int i;
370
371         for (i = 0; i < key_sz; i++)
372                 k_ipad[i] = key[i] ^ 0x36;
373
374         /* Instead of XOR with 0 */
375         for (; i < SHA1_BLOCK_SIZE; i++)
376                 k_ipad[i] = 0x36;
377 }
378
379 static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
380 {
381         int i;
382
383         for (i = 0; i < key_sz; i++)
384                 k_opad[i] = key[i] ^ 0x5c;
385
386         /* Instead of XOR with 0 */
387         for (; i < SHA1_BLOCK_SIZE; i++)
388                 k_opad[i] = 0x5c;
389 }
390
391 static void sa_export_shash(void *state, struct shash_desc *hash,
392                             int digest_size, __be32 *out)
393 {
394         struct sha1_state *sha1;
395         struct sha256_state *sha256;
396         u32 *result;
397
398         switch (digest_size) {
399         case SHA1_DIGEST_SIZE:
400                 sha1 = state;
401                 result = sha1->state;
402                 break;
403         case SHA256_DIGEST_SIZE:
404                 sha256 = state;
405                 result = sha256->state;
406                 break;
407         default:
408                 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
409                         digest_size);
410                 return;
411         }
412
413         crypto_shash_export(hash, state);
414
415         cpu_to_be32_array(out, result, digest_size / 4);
416 }
417
418 static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
419                               u16 key_sz, __be32 *ipad, __be32 *opad)
420 {
421         SHASH_DESC_ON_STACK(shash, data->ctx->shash);
422         int block_size = crypto_shash_blocksize(data->ctx->shash);
423         int digest_size = crypto_shash_digestsize(data->ctx->shash);
424         union {
425                 struct sha1_state sha1;
426                 struct sha256_state sha256;
427                 u8 k_pad[SHA1_BLOCK_SIZE];
428         } sha;
429
430         shash->tfm = data->ctx->shash;
431
432         prepare_kipad(sha.k_pad, key, key_sz);
433
434         crypto_shash_init(shash);
435         crypto_shash_update(shash, sha.k_pad, block_size);
436         sa_export_shash(&sha, shash, digest_size, ipad);
437
438         prepare_kopad(sha.k_pad, key, key_sz);
439
440         crypto_shash_init(shash);
441         crypto_shash_update(shash, sha.k_pad, block_size);
442
443         sa_export_shash(&sha, shash, digest_size, opad);
444
445         memzero_explicit(&sha, sizeof(sha));
446 }
447
448 /* Derive the inverse key used in AES-CBC decryption operation */
449 static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
450 {
451         struct crypto_aes_ctx ctx;
452         int key_pos;
453
454         if (aes_expandkey(&ctx, key, key_sz)) {
455                 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
456                 return -EINVAL;
457         }
458
459         /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
460         if (key_sz == AES_KEYSIZE_192) {
461                 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
462                 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
463         }
464
465         /* Based crypto_aes_expand_key logic */
466         switch (key_sz) {
467         case AES_KEYSIZE_128:
468         case AES_KEYSIZE_192:
469                 key_pos = key_sz + 24;
470                 break;
471
472         case AES_KEYSIZE_256:
473                 key_pos = key_sz + 24 - 4;
474                 break;
475
476         default:
477                 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
478                 return -EINVAL;
479         }
480
481         memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
482         return 0;
483 }
484
485 /* Set Security context for the encryption engine */
486 static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
487                          u8 enc, u8 *sc_buf)
488 {
489         const u8 *mci = NULL;
490
491         /* Set Encryption mode selector to crypto processing */
492         sc_buf[0] = SA_CRYPTO_PROCESSING;
493
494         if (enc)
495                 mci = ad->mci_enc;
496         else
497                 mci = ad->mci_dec;
498         /* Set the mode control instructions in security context */
499         if (mci)
500                 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
501
502         /* For AES-CBC decryption get the inverse key */
503         if (ad->inv_key && !enc) {
504                 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
505                         return -EINVAL;
506         /* For all other cases: key is used */
507         } else {
508                 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
509         }
510
511         return 0;
512 }
513
514 /* Set Security context for the authentication engine */
515 static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
516                            u8 *sc_buf)
517 {
518         __be32 *ipad = (void *)(sc_buf + 32);
519         __be32 *opad = (void *)(sc_buf + 64);
520
521         /* Set Authentication mode selector to hash processing */
522         sc_buf[0] = SA_HASH_PROCESSING;
523         /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
524         sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
525         sc_buf[1] |= ad->auth_ctrl;
526
527         /* Copy the keys or ipad/opad */
528         if (ad->keyed_mac)
529                 ad->prep_iopad(ad, key, key_sz, ipad, opad);
530         else {
531                 /* basic hash */
532                 sc_buf[1] |= SA_BASIC_HASH;
533         }
534 }
535
536 static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
537 {
538         int j;
539
540         for (j = 0; j < ((size16) ? 4 : 2); j++) {
541                 *out = cpu_to_be32(*((u32 *)iv));
542                 iv += 4;
543                 out++;
544         }
545 }
546
547 /* Format general command label */
548 static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
549                               struct sa_cmdl_upd_info *upd_info)
550 {
551         u8 enc_offset = 0, auth_offset = 0, total = 0;
552         u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
553         u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
554         u32 *word_ptr = (u32 *)cmdl;
555         int i;
556
557         /* Clear the command label */
558         memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
559
560         /* Iniialize the command update structure */
561         memzero_explicit(upd_info, sizeof(*upd_info));
562
563         if (cfg->enc_eng_id && cfg->auth_eng_id) {
564                 if (cfg->enc) {
565                         auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
566                         enc_next_eng = cfg->auth_eng_id;
567
568                         if (cfg->iv_size)
569                                 auth_offset += cfg->iv_size;
570                 } else {
571                         enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
572                         auth_next_eng = cfg->enc_eng_id;
573                 }
574         }
575
576         if (cfg->enc_eng_id) {
577                 upd_info->flags |= SA_CMDL_UPD_ENC;
578                 upd_info->enc_size.index = enc_offset >> 2;
579                 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
580                 /* Encryption command label */
581                 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
582
583                 /* Encryption modes requiring IV */
584                 if (cfg->iv_size) {
585                         upd_info->flags |= SA_CMDL_UPD_ENC_IV;
586                         upd_info->enc_iv.index =
587                                 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
588                         upd_info->enc_iv.size = cfg->iv_size;
589
590                         cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
591                                 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
592
593                         cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
594                                 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
595                         total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
596                 } else {
597                         cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
598                                                 SA_CMDL_HEADER_SIZE_BYTES;
599                         total += SA_CMDL_HEADER_SIZE_BYTES;
600                 }
601         }
602
603         if (cfg->auth_eng_id) {
604                 upd_info->flags |= SA_CMDL_UPD_AUTH;
605                 upd_info->auth_size.index = auth_offset >> 2;
606                 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
607                 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
608                 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
609                         SA_CMDL_HEADER_SIZE_BYTES;
610                 total += SA_CMDL_HEADER_SIZE_BYTES;
611         }
612
613         total = roundup(total, 8);
614
615         for (i = 0; i < total / 4; i++)
616                 word_ptr[i] = swab32(word_ptr[i]);
617
618         return total;
619 }
620
621 /* Update Command label */
622 static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
623                                   struct sa_cmdl_upd_info *upd_info)
624 {
625         int i = 0, j;
626
627         if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
628                 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
629                 cmdl[upd_info->enc_size.index] |= req->enc_size;
630                 cmdl[upd_info->enc_offset.index] &=
631                                                 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
632                 cmdl[upd_info->enc_offset.index] |=
633                         ((u32)req->enc_offset <<
634                          __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
635
636                 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
637                         __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
638                         u32 *enc_iv = (u32 *)req->enc_iv;
639
640                         for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
641                                 data[j] = cpu_to_be32(*enc_iv);
642                                 enc_iv++;
643                         }
644                 }
645         }
646
647         if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
648                 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
649                 cmdl[upd_info->auth_size.index] |= req->auth_size;
650                 cmdl[upd_info->auth_offset.index] &=
651                         ~SA_CMDL_SOP_BYPASS_LEN_MASK;
652                 cmdl[upd_info->auth_offset.index] |=
653                         ((u32)req->auth_offset <<
654                          __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
655                 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
656                         sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
657                                    req->auth_iv,
658                                    (upd_info->auth_iv.size > 8));
659                 }
660                 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
661                         int offset = (req->auth_size & 0xF) ? 4 : 0;
662
663                         memcpy(&cmdl[upd_info->aux_key_info.index],
664                                &upd_info->aux_key[offset], 16);
665                 }
666         }
667 }
668
669 /* Format SWINFO words to be sent to SA */
670 static
671 void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
672                    u8 cmdl_present, u8 cmdl_offset, u8 flags,
673                    u8 hash_size, u32 *swinfo)
674 {
675         swinfo[0] = sc_id;
676         swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
677         if (likely(cmdl_present))
678                 swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
679                                                 __ffs(SA_SW0_CMDL_INFO_MASK));
680         swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
681
682         swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
683         swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
684         swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
685         swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
686 }
687
688 /* Dump the security context */
689 static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
690 {
691 #ifdef DEBUG
692         dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
693         print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
694                        16, 1, buf, SA_CTX_MAX_SZ, false);
695 #endif
696 }
697
698 static
699 int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
700                u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
701                struct algo_data *ad, u8 enc, u32 *swinfo)
702 {
703         int enc_sc_offset = 0;
704         int auth_sc_offset = 0;
705         u8 *sc_buf = ctx->sc;
706         u16 sc_id = ctx->sc_id;
707         u8 first_engine = 0;
708
709         memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
710
711         if (ad->auth_eng.eng_id) {
712                 if (enc)
713                         first_engine = ad->enc_eng.eng_id;
714                 else
715                         first_engine = ad->auth_eng.eng_id;
716
717                 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
718                 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
719                 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
720                 if (!ad->hash_size)
721                         return -EINVAL;
722                 ad->hash_size = roundup(ad->hash_size, 8);
723
724         } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
725                 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
726                 first_engine = ad->enc_eng.eng_id;
727                 sc_buf[1] = SA_SCCTL_FE_ENC;
728                 ad->hash_size = ad->iv_out_size;
729         }
730
731         /* SCCTL Owner info: 0=host, 1=CP_ACE */
732         sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
733         memcpy(&sc_buf[2], &sc_id, 2);
734         sc_buf[4] = 0x0;
735         sc_buf[5] = PRIV_ID;
736         sc_buf[6] = PRIV;
737         sc_buf[7] = 0x0;
738
739         /* Prepare context for encryption engine */
740         if (ad->enc_eng.sc_size) {
741                 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
742                                   &sc_buf[enc_sc_offset]))
743                         return -EINVAL;
744         }
745
746         /* Prepare context for authentication engine */
747         if (ad->auth_eng.sc_size)
748                 sa_set_sc_auth(ad, auth_key, auth_key_sz,
749                                &sc_buf[auth_sc_offset]);
750
751         /* Set the ownership of context to CP_ACE */
752         sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
753
754         /* swizzle the security context */
755         sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
756
757         sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
758                       SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
759
760         sa_dump_sc(sc_buf, ctx->sc_phys);
761
762         return 0;
763 }
764
765 /* Free the per direction context memory */
766 static void sa_free_ctx_info(struct sa_ctx_info *ctx,
767                              struct sa_crypto_data *data)
768 {
769         unsigned long bn;
770
771         bn = ctx->sc_id - data->sc_id_start;
772         spin_lock(&data->scid_lock);
773         __clear_bit(bn, data->ctx_bm);
774         data->sc_id--;
775         spin_unlock(&data->scid_lock);
776
777         if (ctx->sc) {
778                 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
779                 ctx->sc = NULL;
780         }
781 }
782
783 static int sa_init_ctx_info(struct sa_ctx_info *ctx,
784                             struct sa_crypto_data *data)
785 {
786         unsigned long bn;
787         int err;
788
789         spin_lock(&data->scid_lock);
790         bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
791         __set_bit(bn, data->ctx_bm);
792         data->sc_id++;
793         spin_unlock(&data->scid_lock);
794
795         ctx->sc_id = (u16)(data->sc_id_start + bn);
796
797         ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
798         if (!ctx->sc) {
799                 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
800                 err = -ENOMEM;
801                 goto scid_rollback;
802         }
803
804         return 0;
805
806 scid_rollback:
807         spin_lock(&data->scid_lock);
808         __clear_bit(bn, data->ctx_bm);
809         data->sc_id--;
810         spin_unlock(&data->scid_lock);
811
812         return err;
813 }
814
815 static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
816 {
817         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
818         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
819
820         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
821                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
822                 ctx->dec.sc_id, &ctx->dec.sc_phys);
823
824         sa_free_ctx_info(&ctx->enc, data);
825         sa_free_ctx_info(&ctx->dec, data);
826
827         crypto_free_skcipher(ctx->fallback.skcipher);
828 }
829
830 static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
831 {
832         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
833         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
834         const char *name = crypto_tfm_alg_name(&tfm->base);
835         struct crypto_skcipher *child;
836         int ret;
837
838         memzero_explicit(ctx, sizeof(*ctx));
839         ctx->dev_data = data;
840
841         ret = sa_init_ctx_info(&ctx->enc, data);
842         if (ret)
843                 return ret;
844         ret = sa_init_ctx_info(&ctx->dec, data);
845         if (ret) {
846                 sa_free_ctx_info(&ctx->enc, data);
847                 return ret;
848         }
849
850         child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
851
852         if (IS_ERR(child)) {
853                 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
854                 return PTR_ERR(child);
855         }
856
857         ctx->fallback.skcipher = child;
858         crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
859                                          sizeof(struct skcipher_request));
860
861         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
862                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
863                 ctx->dec.sc_id, &ctx->dec.sc_phys);
864         return 0;
865 }
866
867 static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
868                             unsigned int keylen, struct algo_data *ad)
869 {
870         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
871         struct crypto_skcipher *child = ctx->fallback.skcipher;
872         int cmdl_len;
873         struct sa_cmdl_cfg cfg;
874         int ret;
875
876         if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
877             keylen != AES_KEYSIZE_256)
878                 return -EINVAL;
879
880         ad->enc_eng.eng_id = SA_ENG_ID_EM1;
881         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
882
883         memzero_explicit(&cfg, sizeof(cfg));
884         cfg.enc_eng_id = ad->enc_eng.eng_id;
885         cfg.iv_size = crypto_skcipher_ivsize(tfm);
886
887         crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
888         crypto_skcipher_set_flags(child, tfm->base.crt_flags &
889                                          CRYPTO_TFM_REQ_MASK);
890         ret = crypto_skcipher_setkey(child, key, keylen);
891         if (ret)
892                 return ret;
893
894         /* Setup Encryption Security Context & Command label template */
895         if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
896                        &ctx->enc.epib[1]))
897                 goto badkey;
898
899         cmdl_len = sa_format_cmdl_gen(&cfg,
900                                       (u8 *)ctx->enc.cmdl,
901                                       &ctx->enc.cmdl_upd_info);
902         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
903                 goto badkey;
904
905         ctx->enc.cmdl_size = cmdl_len;
906
907         /* Setup Decryption Security Context & Command label template */
908         if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
909                        &ctx->dec.epib[1]))
910                 goto badkey;
911
912         cfg.enc_eng_id = ad->enc_eng.eng_id;
913         cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
914                                       &ctx->dec.cmdl_upd_info);
915
916         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
917                 goto badkey;
918
919         ctx->dec.cmdl_size = cmdl_len;
920         ctx->iv_idx = ad->iv_idx;
921
922         return 0;
923
924 badkey:
925         dev_err(sa_k3_dev, "%s: badkey\n", __func__);
926         return -EINVAL;
927 }
928
929 static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
930                              unsigned int keylen)
931 {
932         struct algo_data ad = { 0 };
933         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
934         int key_idx = (keylen >> 3) - 2;
935
936         if (key_idx >= 3)
937                 return -EINVAL;
938
939         ad.mci_enc = mci_cbc_enc_array[key_idx];
940         ad.mci_dec = mci_cbc_dec_array[key_idx];
941         ad.inv_key = true;
942         ad.ealg_id = SA_EALG_ID_AES_CBC;
943         ad.iv_idx = 4;
944         ad.iv_out_size = 16;
945
946         return sa_cipher_setkey(tfm, key, keylen, &ad);
947 }
948
949 static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
950                              unsigned int keylen)
951 {
952         struct algo_data ad = { 0 };
953         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
954         int key_idx = (keylen >> 3) - 2;
955
956         if (key_idx >= 3)
957                 return -EINVAL;
958
959         ad.mci_enc = mci_ecb_enc_array[key_idx];
960         ad.mci_dec = mci_ecb_dec_array[key_idx];
961         ad.inv_key = true;
962         ad.ealg_id = SA_EALG_ID_AES_ECB;
963
964         return sa_cipher_setkey(tfm, key, keylen, &ad);
965 }
966
967 static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
968                               unsigned int keylen)
969 {
970         struct algo_data ad = { 0 };
971
972         ad.mci_enc = mci_cbc_3des_enc_array;
973         ad.mci_dec = mci_cbc_3des_dec_array;
974         ad.ealg_id = SA_EALG_ID_3DES_CBC;
975         ad.iv_idx = 6;
976         ad.iv_out_size = 8;
977
978         return sa_cipher_setkey(tfm, key, keylen, &ad);
979 }
980
981 static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
982                               unsigned int keylen)
983 {
984         struct algo_data ad = { 0 };
985
986         ad.mci_enc = mci_ecb_3des_enc_array;
987         ad.mci_dec = mci_ecb_3des_dec_array;
988
989         return sa_cipher_setkey(tfm, key, keylen, &ad);
990 }
991
992 static void sa_sync_from_device(struct sa_rx_data *rxd)
993 {
994         struct sg_table *sgt;
995
996         if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
997                 sgt = &rxd->mapped_sg[0].sgt;
998         else
999                 sgt = &rxd->mapped_sg[1].sgt;
1000
1001         dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1002 }
1003
1004 static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1005 {
1006         int i;
1007
1008         for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1009                 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1010
1011                 if (mapped_sg->mapped) {
1012                         dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1013                                           mapped_sg->dir, 0);
1014                         kfree(mapped_sg->split_sg);
1015                 }
1016         }
1017
1018         kfree(rxd);
1019 }
1020
1021 static void sa_aes_dma_in_callback(void *data)
1022 {
1023         struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1024         struct skcipher_request *req;
1025         u32 *result;
1026         __be32 *mdptr;
1027         size_t ml, pl;
1028         int i;
1029
1030         sa_sync_from_device(rxd);
1031         req = container_of(rxd->req, struct skcipher_request, base);
1032
1033         if (req->iv) {
1034                 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1035                                                                &ml);
1036                 result = (u32 *)req->iv;
1037
1038                 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1039                         result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1040         }
1041
1042         sa_free_sa_rx_data(rxd);
1043
1044         skcipher_request_complete(req, 0);
1045 }
1046
1047 static void
1048 sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1049 {
1050         u32 *out, *in;
1051         int i;
1052
1053         for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1054                 *out++ = *in++;
1055
1056         mdptr[4] = (0xFFFF << 16);
1057         for (out = &mdptr[5], in = psdata, i = 0;
1058              i < pslen / sizeof(u32); i++)
1059                 *out++ = *in++;
1060 }
1061
1062 static int sa_run(struct sa_req *req)
1063 {
1064         struct sa_rx_data *rxd;
1065         gfp_t gfp_flags;
1066         u32 cmdl[SA_MAX_CMDL_WORDS];
1067         struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1068         struct device *ddev;
1069         struct dma_chan *dma_rx;
1070         int sg_nents, src_nents, dst_nents;
1071         struct scatterlist *src, *dst;
1072         size_t pl, ml, split_size;
1073         struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1074         int ret;
1075         struct dma_async_tx_descriptor *tx_out;
1076         u32 *mdptr;
1077         bool diff_dst;
1078         enum dma_data_direction dir_src;
1079         struct sa_mapped_sg *mapped_sg;
1080
1081         gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1082                 GFP_KERNEL : GFP_ATOMIC;
1083
1084         rxd = kzalloc(sizeof(*rxd), gfp_flags);
1085         if (!rxd)
1086                 return -ENOMEM;
1087
1088         if (req->src != req->dst) {
1089                 diff_dst = true;
1090                 dir_src = DMA_TO_DEVICE;
1091         } else {
1092                 diff_dst = false;
1093                 dir_src = DMA_BIDIRECTIONAL;
1094         }
1095
1096         /*
1097          * SA2UL has an interesting feature where the receive DMA channel
1098          * is selected based on the data passed to the engine. Within the
1099          * transition range, there is also a space where it is impossible
1100          * to determine where the data will end up, and this should be
1101          * avoided. This will be handled by the SW fallback mechanism by
1102          * the individual algorithm implementations.
1103          */
1104         if (req->size >= 256)
1105                 dma_rx = pdata->dma_rx2;
1106         else
1107                 dma_rx = pdata->dma_rx1;
1108
1109         ddev = dma_rx->device->dev;
1110         rxd->ddev = ddev;
1111
1112         memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1113
1114         sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1115
1116         if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1117                 if (req->enc)
1118                         req->type |=
1119                                 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1120                 else
1121                         req->type |=
1122                                 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1123         }
1124
1125         cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1126
1127         /*
1128          * Map the packets, first we check if the data fits into a single
1129          * sg entry and use that if possible. If it does not fit, we check
1130          * if we need to do sg_split to align the scatterlist data on the
1131          * actual data size being processed by the crypto engine.
1132          */
1133         src = req->src;
1134         sg_nents = sg_nents_for_len(src, req->size);
1135
1136         split_size = req->size;
1137
1138         mapped_sg = &rxd->mapped_sg[0];
1139         if (sg_nents == 1 && split_size <= req->src->length) {
1140                 src = &mapped_sg->static_sg;
1141                 src_nents = 1;
1142                 sg_init_table(src, 1);
1143                 sg_set_page(src, sg_page(req->src), split_size,
1144                             req->src->offset);
1145
1146                 mapped_sg->sgt.sgl = src;
1147                 mapped_sg->sgt.orig_nents = src_nents;
1148                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1149                 if (ret)
1150                         return ret;
1151
1152                 mapped_sg->dir = dir_src;
1153                 mapped_sg->mapped = true;
1154         } else {
1155                 mapped_sg->sgt.sgl = req->src;
1156                 mapped_sg->sgt.orig_nents = sg_nents;
1157                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1158                 if (ret)
1159                         return ret;
1160
1161                 mapped_sg->dir = dir_src;
1162                 mapped_sg->mapped = true;
1163
1164                 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1165                                &split_size, &src, &src_nents, gfp_flags);
1166                 if (ret) {
1167                         src_nents = mapped_sg->sgt.nents;
1168                         src = mapped_sg->sgt.sgl;
1169                 } else {
1170                         mapped_sg->split_sg = src;
1171                 }
1172         }
1173
1174         dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1175
1176         if (!diff_dst) {
1177                 dst_nents = src_nents;
1178                 dst = src;
1179         } else {
1180                 dst_nents = sg_nents_for_len(req->dst, req->size);
1181                 mapped_sg = &rxd->mapped_sg[1];
1182
1183                 if (dst_nents == 1 && split_size <= req->dst->length) {
1184                         dst = &mapped_sg->static_sg;
1185                         dst_nents = 1;
1186                         sg_init_table(dst, 1);
1187                         sg_set_page(dst, sg_page(req->dst), split_size,
1188                                     req->dst->offset);
1189
1190                         mapped_sg->sgt.sgl = dst;
1191                         mapped_sg->sgt.orig_nents = dst_nents;
1192                         ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1193                                               DMA_FROM_DEVICE, 0);
1194                         if (ret)
1195                                 goto err_cleanup;
1196
1197                         mapped_sg->dir = DMA_FROM_DEVICE;
1198                         mapped_sg->mapped = true;
1199                 } else {
1200                         mapped_sg->sgt.sgl = req->dst;
1201                         mapped_sg->sgt.orig_nents = dst_nents;
1202                         ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1203                                               DMA_FROM_DEVICE, 0);
1204                         if (ret)
1205                                 goto err_cleanup;
1206
1207                         mapped_sg->dir = DMA_FROM_DEVICE;
1208                         mapped_sg->mapped = true;
1209
1210                         ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1211                                        0, 1, &split_size, &dst, &dst_nents,
1212                                        gfp_flags);
1213                         if (ret) {
1214                                 dst_nents = mapped_sg->sgt.nents;
1215                                 dst = mapped_sg->sgt.sgl;
1216                         } else {
1217                                 mapped_sg->split_sg = dst;
1218                         }
1219                 }
1220         }
1221
1222         rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1223                                              DMA_DEV_TO_MEM,
1224                                              DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1225         if (!rxd->tx_in) {
1226                 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1227                 ret = -EINVAL;
1228                 goto err_cleanup;
1229         }
1230
1231         rxd->req = (void *)req->base;
1232         rxd->enc = req->enc;
1233         rxd->iv_idx = req->ctx->iv_idx;
1234         rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1235         rxd->tx_in->callback = req->callback;
1236         rxd->tx_in->callback_param = rxd;
1237
1238         tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1239                                          src_nents, DMA_MEM_TO_DEV,
1240                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1241
1242         if (!tx_out) {
1243                 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1244                 ret = -EINVAL;
1245                 goto err_cleanup;
1246         }
1247
1248         /*
1249          * Prepare metadata for DMA engine. This essentially describes the
1250          * crypto algorithm to be used, data sizes, different keys etc.
1251          */
1252         mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1253
1254         sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1255                                    sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1256                            sa_ctx->epib);
1257
1258         ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1259         dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1260
1261         dmaengine_submit(tx_out);
1262         dmaengine_submit(rxd->tx_in);
1263
1264         dma_async_issue_pending(dma_rx);
1265         dma_async_issue_pending(pdata->dma_tx);
1266
1267         return -EINPROGRESS;
1268
1269 err_cleanup:
1270         sa_free_sa_rx_data(rxd);
1271
1272         return ret;
1273 }
1274
1275 static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1276 {
1277         struct sa_tfm_ctx *ctx =
1278             crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1279         struct crypto_alg *alg = req->base.tfm->__crt_alg;
1280         struct sa_req sa_req = { 0 };
1281
1282         if (!req->cryptlen)
1283                 return 0;
1284
1285         if (req->cryptlen % alg->cra_blocksize)
1286                 return -EINVAL;
1287
1288         /* Use SW fallback if the data size is not supported */
1289         if (req->cryptlen > SA_MAX_DATA_SZ ||
1290             (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1291              req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1292                 struct skcipher_request *subreq = skcipher_request_ctx(req);
1293
1294                 skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1295                 skcipher_request_set_callback(subreq, req->base.flags,
1296                                               req->base.complete,
1297                                               req->base.data);
1298                 skcipher_request_set_crypt(subreq, req->src, req->dst,
1299                                            req->cryptlen, req->iv);
1300                 if (enc)
1301                         return crypto_skcipher_encrypt(subreq);
1302                 else
1303                         return crypto_skcipher_decrypt(subreq);
1304         }
1305
1306         sa_req.size = req->cryptlen;
1307         sa_req.enc_size = req->cryptlen;
1308         sa_req.src = req->src;
1309         sa_req.dst = req->dst;
1310         sa_req.enc_iv = iv;
1311         sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1312         sa_req.enc = enc;
1313         sa_req.callback = sa_aes_dma_in_callback;
1314         sa_req.mdata_size = 44;
1315         sa_req.base = &req->base;
1316         sa_req.ctx = ctx;
1317
1318         return sa_run(&sa_req);
1319 }
1320
1321 static int sa_encrypt(struct skcipher_request *req)
1322 {
1323         return sa_cipher_run(req, req->iv, 1);
1324 }
1325
1326 static int sa_decrypt(struct skcipher_request *req)
1327 {
1328         return sa_cipher_run(req, req->iv, 0);
1329 }
1330
1331 static void sa_sha_dma_in_callback(void *data)
1332 {
1333         struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1334         struct ahash_request *req;
1335         struct crypto_ahash *tfm;
1336         unsigned int authsize;
1337         int i;
1338         size_t ml, pl;
1339         u32 *result;
1340         __be32 *mdptr;
1341
1342         sa_sync_from_device(rxd);
1343         req = container_of(rxd->req, struct ahash_request, base);
1344         tfm = crypto_ahash_reqtfm(req);
1345         authsize = crypto_ahash_digestsize(tfm);
1346
1347         mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1348         result = (u32 *)req->result;
1349
1350         for (i = 0; i < (authsize / 4); i++)
1351                 result[i] = be32_to_cpu(mdptr[i + 4]);
1352
1353         sa_free_sa_rx_data(rxd);
1354
1355         ahash_request_complete(req, 0);
1356 }
1357
1358 static int zero_message_process(struct ahash_request *req)
1359 {
1360         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1361         int sa_digest_size = crypto_ahash_digestsize(tfm);
1362
1363         switch (sa_digest_size) {
1364         case SHA1_DIGEST_SIZE:
1365                 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1366                 break;
1367         case SHA256_DIGEST_SIZE:
1368                 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1369                 break;
1370         case SHA512_DIGEST_SIZE:
1371                 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1372                 break;
1373         default:
1374                 return -EINVAL;
1375         }
1376
1377         return 0;
1378 }
1379
1380 static int sa_sha_run(struct ahash_request *req)
1381 {
1382         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1383         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1384         struct sa_req sa_req = { 0 };
1385         size_t auth_len;
1386
1387         auth_len = req->nbytes;
1388
1389         if (!auth_len)
1390                 return zero_message_process(req);
1391
1392         if (auth_len > SA_MAX_DATA_SZ ||
1393             (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1394              auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1395                 struct ahash_request *subreq = &rctx->fallback_req;
1396                 int ret = 0;
1397
1398                 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1399                 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1400
1401                 crypto_ahash_init(subreq);
1402
1403                 subreq->nbytes = auth_len;
1404                 subreq->src = req->src;
1405                 subreq->result = req->result;
1406
1407                 ret |= crypto_ahash_update(subreq);
1408
1409                 subreq->nbytes = 0;
1410
1411                 ret |= crypto_ahash_final(subreq);
1412
1413                 return ret;
1414         }
1415
1416         sa_req.size = auth_len;
1417         sa_req.auth_size = auth_len;
1418         sa_req.src = req->src;
1419         sa_req.dst = req->src;
1420         sa_req.enc = true;
1421         sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1422         sa_req.callback = sa_sha_dma_in_callback;
1423         sa_req.mdata_size = 28;
1424         sa_req.ctx = ctx;
1425         sa_req.base = &req->base;
1426
1427         return sa_run(&sa_req);
1428 }
1429
1430 static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1431 {
1432         int bs = crypto_shash_blocksize(ctx->shash);
1433         int cmdl_len;
1434         struct sa_cmdl_cfg cfg;
1435
1436         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1437         ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1438         ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1439
1440         memset(ctx->authkey, 0, bs);
1441         memset(&cfg, 0, sizeof(cfg));
1442         cfg.aalg = ad->aalg_id;
1443         cfg.enc_eng_id = ad->enc_eng.eng_id;
1444         cfg.auth_eng_id = ad->auth_eng.eng_id;
1445         cfg.iv_size = 0;
1446         cfg.akey = NULL;
1447         cfg.akey_len = 0;
1448
1449         /* Setup Encryption Security Context & Command label template */
1450         if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
1451                        &ctx->enc.epib[1]))
1452                 goto badkey;
1453
1454         cmdl_len = sa_format_cmdl_gen(&cfg,
1455                                       (u8 *)ctx->enc.cmdl,
1456                                       &ctx->enc.cmdl_upd_info);
1457         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1458                 goto badkey;
1459
1460         ctx->enc.cmdl_size = cmdl_len;
1461
1462         return 0;
1463
1464 badkey:
1465         dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1466         return -EINVAL;
1467 }
1468
1469 static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1470 {
1471         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1472         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1473         int ret;
1474
1475         memset(ctx, 0, sizeof(*ctx));
1476         ctx->dev_data = data;
1477         ret = sa_init_ctx_info(&ctx->enc, data);
1478         if (ret)
1479                 return ret;
1480
1481         if (alg_base) {
1482                 ctx->shash = crypto_alloc_shash(alg_base, 0,
1483                                                 CRYPTO_ALG_NEED_FALLBACK);
1484                 if (IS_ERR(ctx->shash)) {
1485                         dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1486                                 alg_base);
1487                         return PTR_ERR(ctx->shash);
1488                 }
1489                 /* for fallback */
1490                 ctx->fallback.ahash =
1491                         crypto_alloc_ahash(alg_base, 0,
1492                                            CRYPTO_ALG_NEED_FALLBACK);
1493                 if (IS_ERR(ctx->fallback.ahash)) {
1494                         dev_err(ctx->dev_data->dev,
1495                                 "Could not load fallback driver\n");
1496                         return PTR_ERR(ctx->fallback.ahash);
1497                 }
1498         }
1499
1500         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1501                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1502                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1503
1504         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1505                                  sizeof(struct sa_sha_req_ctx) +
1506                                  crypto_ahash_reqsize(ctx->fallback.ahash));
1507
1508         return 0;
1509 }
1510
1511 static int sa_sha_digest(struct ahash_request *req)
1512 {
1513         return sa_sha_run(req);
1514 }
1515
1516 static int sa_sha_init(struct ahash_request *req)
1517 {
1518         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1519         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1520         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1521
1522         dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1523                 crypto_ahash_digestsize(tfm), rctx);
1524
1525         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1526         rctx->fallback_req.base.flags =
1527                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1528
1529         return crypto_ahash_init(&rctx->fallback_req);
1530 }
1531
1532 static int sa_sha_update(struct ahash_request *req)
1533 {
1534         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1535         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1536         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1537
1538         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1539         rctx->fallback_req.base.flags =
1540                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1541         rctx->fallback_req.nbytes = req->nbytes;
1542         rctx->fallback_req.src = req->src;
1543
1544         return crypto_ahash_update(&rctx->fallback_req);
1545 }
1546
1547 static int sa_sha_final(struct ahash_request *req)
1548 {
1549         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1550         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1551         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1552
1553         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1554         rctx->fallback_req.base.flags =
1555                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1556         rctx->fallback_req.result = req->result;
1557
1558         return crypto_ahash_final(&rctx->fallback_req);
1559 }
1560
1561 static int sa_sha_finup(struct ahash_request *req)
1562 {
1563         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1564         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1565         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1566
1567         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1568         rctx->fallback_req.base.flags =
1569                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1570
1571         rctx->fallback_req.nbytes = req->nbytes;
1572         rctx->fallback_req.src = req->src;
1573         rctx->fallback_req.result = req->result;
1574
1575         return crypto_ahash_finup(&rctx->fallback_req);
1576 }
1577
1578 static int sa_sha_import(struct ahash_request *req, const void *in)
1579 {
1580         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1581         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1582         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1583
1584         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1585         rctx->fallback_req.base.flags = req->base.flags &
1586                 CRYPTO_TFM_REQ_MAY_SLEEP;
1587
1588         return crypto_ahash_import(&rctx->fallback_req, in);
1589 }
1590
1591 static int sa_sha_export(struct ahash_request *req, void *out)
1592 {
1593         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1594         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1595         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1596         struct ahash_request *subreq = &rctx->fallback_req;
1597
1598         ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1599         subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1600
1601         return crypto_ahash_export(subreq, out);
1602 }
1603
1604 static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1605 {
1606         struct algo_data ad = { 0 };
1607         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1608
1609         sa_sha_cra_init_alg(tfm, "sha1");
1610
1611         ad.aalg_id = SA_AALG_ID_SHA1;
1612         ad.hash_size = SHA1_DIGEST_SIZE;
1613         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1614
1615         sa_sha_setup(ctx, &ad);
1616
1617         return 0;
1618 }
1619
1620 static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1621 {
1622         struct algo_data ad = { 0 };
1623         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1624
1625         sa_sha_cra_init_alg(tfm, "sha256");
1626
1627         ad.aalg_id = SA_AALG_ID_SHA2_256;
1628         ad.hash_size = SHA256_DIGEST_SIZE;
1629         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1630
1631         sa_sha_setup(ctx, &ad);
1632
1633         return 0;
1634 }
1635
1636 static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1637 {
1638         struct algo_data ad = { 0 };
1639         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1640
1641         sa_sha_cra_init_alg(tfm, "sha512");
1642
1643         ad.aalg_id = SA_AALG_ID_SHA2_512;
1644         ad.hash_size = SHA512_DIGEST_SIZE;
1645         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1646
1647         sa_sha_setup(ctx, &ad);
1648
1649         return 0;
1650 }
1651
1652 static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1653 {
1654         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1655         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1656
1657         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1658                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1659                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1660
1661         if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1662                 sa_free_ctx_info(&ctx->enc, data);
1663
1664         crypto_free_shash(ctx->shash);
1665         crypto_free_ahash(ctx->fallback.ahash);
1666 }
1667
1668 static void sa_aead_dma_in_callback(void *data)
1669 {
1670         struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1671         struct aead_request *req;
1672         struct crypto_aead *tfm;
1673         unsigned int start;
1674         unsigned int authsize;
1675         u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1676         size_t pl, ml;
1677         int i;
1678         int err = 0;
1679         u16 auth_len;
1680         u32 *mdptr;
1681
1682         sa_sync_from_device(rxd);
1683         req = container_of(rxd->req, struct aead_request, base);
1684         tfm = crypto_aead_reqtfm(req);
1685         start = req->assoclen + req->cryptlen;
1686         authsize = crypto_aead_authsize(tfm);
1687
1688         mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1689         for (i = 0; i < (authsize / 4); i++)
1690                 mdptr[i + 4] = swab32(mdptr[i + 4]);
1691
1692         auth_len = req->assoclen + req->cryptlen;
1693
1694         if (rxd->enc) {
1695                 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1696                                          1);
1697         } else {
1698                 auth_len -= authsize;
1699                 start -= authsize;
1700                 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1701                                          0);
1702
1703                 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1704         }
1705
1706         sa_free_sa_rx_data(rxd);
1707
1708         aead_request_complete(req, err);
1709 }
1710
1711 static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1712                             const char *fallback)
1713 {
1714         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1715         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1716         int ret;
1717
1718         memzero_explicit(ctx, sizeof(*ctx));
1719
1720         ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1721         if (IS_ERR(ctx->shash)) {
1722                 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1723                 return PTR_ERR(ctx->shash);
1724         }
1725
1726         ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1727                                                CRYPTO_ALG_NEED_FALLBACK);
1728
1729         if (IS_ERR(ctx->fallback.aead)) {
1730                 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1731                         fallback);
1732                 return PTR_ERR(ctx->fallback.aead);
1733         }
1734
1735         crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1736                                 crypto_aead_reqsize(ctx->fallback.aead));
1737
1738         ret = sa_init_ctx_info(&ctx->enc, data);
1739         if (ret)
1740                 return ret;
1741
1742         ret = sa_init_ctx_info(&ctx->dec, data);
1743         if (ret) {
1744                 sa_free_ctx_info(&ctx->enc, data);
1745                 return ret;
1746         }
1747
1748         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1749                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1750                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1751
1752         return ret;
1753 }
1754
1755 static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1756 {
1757         return sa_cra_init_aead(tfm, "sha1",
1758                                 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1759 }
1760
1761 static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1762 {
1763         return sa_cra_init_aead(tfm, "sha256",
1764                                 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1765 }
1766
1767 static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1768 {
1769         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1770         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1771
1772         crypto_free_shash(ctx->shash);
1773         crypto_free_aead(ctx->fallback.aead);
1774
1775         sa_free_ctx_info(&ctx->enc, data);
1776         sa_free_ctx_info(&ctx->dec, data);
1777 }
1778
1779 /* AEAD algorithm configuration interface function */
1780 static int sa_aead_setkey(struct crypto_aead *authenc,
1781                           const u8 *key, unsigned int keylen,
1782                           struct algo_data *ad)
1783 {
1784         struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1785         struct crypto_authenc_keys keys;
1786         int cmdl_len;
1787         struct sa_cmdl_cfg cfg;
1788         int key_idx;
1789
1790         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1791                 return -EINVAL;
1792
1793         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1794         key_idx = (keys.enckeylen >> 3) - 2;
1795         if (key_idx >= 3)
1796                 return -EINVAL;
1797
1798         ad->ctx = ctx;
1799         ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1800         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1801         ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1802         ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1803         ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1804         ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1805         ad->inv_key = true;
1806         ad->keyed_mac = true;
1807         ad->ealg_id = SA_EALG_ID_AES_CBC;
1808         ad->prep_iopad = sa_prepare_iopads;
1809
1810         memset(&cfg, 0, sizeof(cfg));
1811         cfg.enc = true;
1812         cfg.aalg = ad->aalg_id;
1813         cfg.enc_eng_id = ad->enc_eng.eng_id;
1814         cfg.auth_eng_id = ad->auth_eng.eng_id;
1815         cfg.iv_size = crypto_aead_ivsize(authenc);
1816         cfg.akey = keys.authkey;
1817         cfg.akey_len = keys.authkeylen;
1818
1819         /* Setup Encryption Security Context & Command label template */
1820         if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
1821                        keys.authkey, keys.authkeylen,
1822                        ad, 1, &ctx->enc.epib[1]))
1823                 return -EINVAL;
1824
1825         cmdl_len = sa_format_cmdl_gen(&cfg,
1826                                       (u8 *)ctx->enc.cmdl,
1827                                       &ctx->enc.cmdl_upd_info);
1828         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1829                 return -EINVAL;
1830
1831         ctx->enc.cmdl_size = cmdl_len;
1832
1833         /* Setup Decryption Security Context & Command label template */
1834         if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
1835                        keys.authkey, keys.authkeylen,
1836                        ad, 0, &ctx->dec.epib[1]))
1837                 return -EINVAL;
1838
1839         cfg.enc = false;
1840         cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1841                                       &ctx->dec.cmdl_upd_info);
1842
1843         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1844                 return -EINVAL;
1845
1846         ctx->dec.cmdl_size = cmdl_len;
1847
1848         crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1849         crypto_aead_set_flags(ctx->fallback.aead,
1850                               crypto_aead_get_flags(authenc) &
1851                               CRYPTO_TFM_REQ_MASK);
1852         crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1853
1854         return 0;
1855 }
1856
1857 static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1858 {
1859         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1860
1861         return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1862 }
1863
1864 static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1865                                    const u8 *key, unsigned int keylen)
1866 {
1867         struct algo_data ad = { 0 };
1868
1869         ad.ealg_id = SA_EALG_ID_AES_CBC;
1870         ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1871         ad.hash_size = SHA1_DIGEST_SIZE;
1872         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1873
1874         return sa_aead_setkey(authenc, key, keylen, &ad);
1875 }
1876
1877 static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1878                                      const u8 *key, unsigned int keylen)
1879 {
1880         struct algo_data ad = { 0 };
1881
1882         ad.ealg_id = SA_EALG_ID_AES_CBC;
1883         ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1884         ad.hash_size = SHA256_DIGEST_SIZE;
1885         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1886
1887         return sa_aead_setkey(authenc, key, keylen, &ad);
1888 }
1889
1890 static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1891 {
1892         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1893         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1894         struct sa_req sa_req = { 0 };
1895         size_t auth_size, enc_size;
1896
1897         enc_size = req->cryptlen;
1898         auth_size = req->assoclen + req->cryptlen;
1899
1900         if (!enc) {
1901                 enc_size -= crypto_aead_authsize(tfm);
1902                 auth_size -= crypto_aead_authsize(tfm);
1903         }
1904
1905         if (auth_size > SA_MAX_DATA_SZ ||
1906             (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1907              auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1908                 struct aead_request *subreq = aead_request_ctx(req);
1909                 int ret;
1910
1911                 aead_request_set_tfm(subreq, ctx->fallback.aead);
1912                 aead_request_set_callback(subreq, req->base.flags,
1913                                           req->base.complete, req->base.data);
1914                 aead_request_set_crypt(subreq, req->src, req->dst,
1915                                        req->cryptlen, req->iv);
1916                 aead_request_set_ad(subreq, req->assoclen);
1917
1918                 ret = enc ? crypto_aead_encrypt(subreq) :
1919                         crypto_aead_decrypt(subreq);
1920                 return ret;
1921         }
1922
1923         sa_req.enc_offset = req->assoclen;
1924         sa_req.enc_size = enc_size;
1925         sa_req.auth_size = auth_size;
1926         sa_req.size = auth_size;
1927         sa_req.enc_iv = iv;
1928         sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1929         sa_req.enc = enc;
1930         sa_req.callback = sa_aead_dma_in_callback;
1931         sa_req.mdata_size = 52;
1932         sa_req.base = &req->base;
1933         sa_req.ctx = ctx;
1934         sa_req.src = req->src;
1935         sa_req.dst = req->dst;
1936
1937         return sa_run(&sa_req);
1938 }
1939
1940 /* AEAD algorithm encrypt interface function */
1941 static int sa_aead_encrypt(struct aead_request *req)
1942 {
1943         return sa_aead_run(req, req->iv, 1);
1944 }
1945
1946 /* AEAD algorithm decrypt interface function */
1947 static int sa_aead_decrypt(struct aead_request *req)
1948 {
1949         return sa_aead_run(req, req->iv, 0);
1950 }
1951
1952 static struct sa_alg_tmpl sa_algs[] = {
1953         {
1954                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1955                 .alg.skcipher = {
1956                         .base.cra_name          = "cbc(aes)",
1957                         .base.cra_driver_name   = "cbc-aes-sa2ul",
1958                         .base.cra_priority      = 30000,
1959                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
1960                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
1961                                                   CRYPTO_ALG_ASYNC |
1962                                                   CRYPTO_ALG_NEED_FALLBACK,
1963                         .base.cra_blocksize     = AES_BLOCK_SIZE,
1964                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
1965                         .base.cra_module        = THIS_MODULE,
1966                         .init                   = sa_cipher_cra_init,
1967                         .exit                   = sa_cipher_cra_exit,
1968                         .min_keysize            = AES_MIN_KEY_SIZE,
1969                         .max_keysize            = AES_MAX_KEY_SIZE,
1970                         .ivsize                 = AES_BLOCK_SIZE,
1971                         .setkey                 = sa_aes_cbc_setkey,
1972                         .encrypt                = sa_encrypt,
1973                         .decrypt                = sa_decrypt,
1974                 }
1975         },
1976         {
1977                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1978                 .alg.skcipher = {
1979                         .base.cra_name          = "ecb(aes)",
1980                         .base.cra_driver_name   = "ecb-aes-sa2ul",
1981                         .base.cra_priority      = 30000,
1982                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
1983                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
1984                                                   CRYPTO_ALG_ASYNC |
1985                                                   CRYPTO_ALG_NEED_FALLBACK,
1986                         .base.cra_blocksize     = AES_BLOCK_SIZE,
1987                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
1988                         .base.cra_module        = THIS_MODULE,
1989                         .init                   = sa_cipher_cra_init,
1990                         .exit                   = sa_cipher_cra_exit,
1991                         .min_keysize            = AES_MIN_KEY_SIZE,
1992                         .max_keysize            = AES_MAX_KEY_SIZE,
1993                         .setkey                 = sa_aes_ecb_setkey,
1994                         .encrypt                = sa_encrypt,
1995                         .decrypt                = sa_decrypt,
1996                 }
1997         },
1998         {
1999                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2000                 .alg.skcipher = {
2001                         .base.cra_name          = "cbc(des3_ede)",
2002                         .base.cra_driver_name   = "cbc-des3-sa2ul",
2003                         .base.cra_priority      = 30000,
2004                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2005                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2006                                                   CRYPTO_ALG_ASYNC |
2007                                                   CRYPTO_ALG_NEED_FALLBACK,
2008                         .base.cra_blocksize     = DES_BLOCK_SIZE,
2009                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2010                         .base.cra_module        = THIS_MODULE,
2011                         .init                   = sa_cipher_cra_init,
2012                         .exit                   = sa_cipher_cra_exit,
2013                         .min_keysize            = 3 * DES_KEY_SIZE,
2014                         .max_keysize            = 3 * DES_KEY_SIZE,
2015                         .ivsize                 = DES_BLOCK_SIZE,
2016                         .setkey                 = sa_3des_cbc_setkey,
2017                         .encrypt                = sa_encrypt,
2018                         .decrypt                = sa_decrypt,
2019                 }
2020         },
2021         {
2022                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2023                 .alg.skcipher = {
2024                         .base.cra_name          = "ecb(des3_ede)",
2025                         .base.cra_driver_name   = "ecb-des3-sa2ul",
2026                         .base.cra_priority      = 30000,
2027                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2028                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2029                                                   CRYPTO_ALG_ASYNC |
2030                                                   CRYPTO_ALG_NEED_FALLBACK,
2031                         .base.cra_blocksize     = DES_BLOCK_SIZE,
2032                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2033                         .base.cra_module        = THIS_MODULE,
2034                         .init                   = sa_cipher_cra_init,
2035                         .exit                   = sa_cipher_cra_exit,
2036                         .min_keysize            = 3 * DES_KEY_SIZE,
2037                         .max_keysize            = 3 * DES_KEY_SIZE,
2038                         .setkey                 = sa_3des_ecb_setkey,
2039                         .encrypt                = sa_encrypt,
2040                         .decrypt                = sa_decrypt,
2041                 }
2042         },
2043         {
2044                 .type = CRYPTO_ALG_TYPE_AHASH,
2045                 .alg.ahash = {
2046                         .halg.base = {
2047                                 .cra_name       = "sha1",
2048                                 .cra_driver_name        = "sha1-sa2ul",
2049                                 .cra_priority   = 400,
2050                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2051                                                   CRYPTO_ALG_ASYNC |
2052                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2053                                                   CRYPTO_ALG_NEED_FALLBACK,
2054                                 .cra_blocksize  = SHA1_BLOCK_SIZE,
2055                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2056                                 .cra_module     = THIS_MODULE,
2057                                 .cra_init       = sa_sha1_cra_init,
2058                                 .cra_exit       = sa_sha_cra_exit,
2059                         },
2060                         .halg.digestsize        = SHA1_DIGEST_SIZE,
2061                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2062                                                   sizeof(struct sha1_state),
2063                         .init                   = sa_sha_init,
2064                         .update                 = sa_sha_update,
2065                         .final                  = sa_sha_final,
2066                         .finup                  = sa_sha_finup,
2067                         .digest                 = sa_sha_digest,
2068                         .export                 = sa_sha_export,
2069                         .import                 = sa_sha_import,
2070                 },
2071         },
2072         {
2073                 .type = CRYPTO_ALG_TYPE_AHASH,
2074                 .alg.ahash = {
2075                         .halg.base = {
2076                                 .cra_name       = "sha256",
2077                                 .cra_driver_name        = "sha256-sa2ul",
2078                                 .cra_priority   = 400,
2079                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2080                                                   CRYPTO_ALG_ASYNC |
2081                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2082                                                   CRYPTO_ALG_NEED_FALLBACK,
2083                                 .cra_blocksize  = SHA256_BLOCK_SIZE,
2084                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2085                                 .cra_module     = THIS_MODULE,
2086                                 .cra_init       = sa_sha256_cra_init,
2087                                 .cra_exit       = sa_sha_cra_exit,
2088                         },
2089                         .halg.digestsize        = SHA256_DIGEST_SIZE,
2090                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2091                                                   sizeof(struct sha256_state),
2092                         .init                   = sa_sha_init,
2093                         .update                 = sa_sha_update,
2094                         .final                  = sa_sha_final,
2095                         .finup                  = sa_sha_finup,
2096                         .digest                 = sa_sha_digest,
2097                         .export                 = sa_sha_export,
2098                         .import                 = sa_sha_import,
2099                 },
2100         },
2101         {
2102                 .type = CRYPTO_ALG_TYPE_AHASH,
2103                 .alg.ahash = {
2104                         .halg.base = {
2105                                 .cra_name       = "sha512",
2106                                 .cra_driver_name        = "sha512-sa2ul",
2107                                 .cra_priority   = 400,
2108                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2109                                                   CRYPTO_ALG_ASYNC |
2110                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2111                                                   CRYPTO_ALG_NEED_FALLBACK,
2112                                 .cra_blocksize  = SHA512_BLOCK_SIZE,
2113                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2114                                 .cra_module     = THIS_MODULE,
2115                                 .cra_init       = sa_sha512_cra_init,
2116                                 .cra_exit       = sa_sha_cra_exit,
2117                         },
2118                         .halg.digestsize        = SHA512_DIGEST_SIZE,
2119                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2120                                                   sizeof(struct sha512_state),
2121                         .init                   = sa_sha_init,
2122                         .update                 = sa_sha_update,
2123                         .final                  = sa_sha_final,
2124                         .finup                  = sa_sha_finup,
2125                         .digest                 = sa_sha_digest,
2126                         .export                 = sa_sha_export,
2127                         .import                 = sa_sha_import,
2128                 },
2129         },
2130         {
2131                 .type   = CRYPTO_ALG_TYPE_AEAD,
2132                 .alg.aead = {
2133                         .base = {
2134                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2135                                 .cra_driver_name =
2136                                         "authenc(hmac(sha1),cbc(aes))-sa2ul",
2137                                 .cra_blocksize = AES_BLOCK_SIZE,
2138                                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2139                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
2140                                         CRYPTO_ALG_ASYNC |
2141                                         CRYPTO_ALG_NEED_FALLBACK,
2142                                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2143                                 .cra_module = THIS_MODULE,
2144                                 .cra_priority = 3000,
2145                         },
2146                         .ivsize = AES_BLOCK_SIZE,
2147                         .maxauthsize = SHA1_DIGEST_SIZE,
2148
2149                         .init = sa_cra_init_aead_sha1,
2150                         .exit = sa_exit_tfm_aead,
2151                         .setkey = sa_aead_cbc_sha1_setkey,
2152                         .setauthsize = sa_aead_setauthsize,
2153                         .encrypt = sa_aead_encrypt,
2154                         .decrypt = sa_aead_decrypt,
2155                 },
2156         },
2157         {
2158                 .type   = CRYPTO_ALG_TYPE_AEAD,
2159                 .alg.aead = {
2160                         .base = {
2161                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2162                                 .cra_driver_name =
2163                                         "authenc(hmac(sha256),cbc(aes))-sa2ul",
2164                                 .cra_blocksize = AES_BLOCK_SIZE,
2165                                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2166                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
2167                                         CRYPTO_ALG_ASYNC |
2168                                         CRYPTO_ALG_NEED_FALLBACK,
2169                                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2170                                 .cra_module = THIS_MODULE,
2171                                 .cra_alignmask = 0,
2172                                 .cra_priority = 3000,
2173                         },
2174                         .ivsize = AES_BLOCK_SIZE,
2175                         .maxauthsize = SHA256_DIGEST_SIZE,
2176
2177                         .init = sa_cra_init_aead_sha256,
2178                         .exit = sa_exit_tfm_aead,
2179                         .setkey = sa_aead_cbc_sha256_setkey,
2180                         .setauthsize = sa_aead_setauthsize,
2181                         .encrypt = sa_aead_encrypt,
2182                         .decrypt = sa_aead_decrypt,
2183                 },
2184         },
2185 };
2186
2187 /* Register the algorithms in crypto framework */
2188 static void sa_register_algos(const struct device *dev)
2189 {
2190         char *alg_name;
2191         u32 type;
2192         int i, err;
2193
2194         for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2195                 type = sa_algs[i].type;
2196                 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2197                         alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2198                         err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2199                 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2200                         alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2201                         err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2202                 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2203                         alg_name = sa_algs[i].alg.aead.base.cra_name;
2204                         err = crypto_register_aead(&sa_algs[i].alg.aead);
2205                 } else {
2206                         dev_err(dev,
2207                                 "un-supported crypto algorithm (%d)",
2208                                 sa_algs[i].type);
2209                         continue;
2210                 }
2211
2212                 if (err)
2213                         dev_err(dev, "Failed to register '%s'\n", alg_name);
2214                 else
2215                         sa_algs[i].registered = true;
2216         }
2217 }
2218
2219 /* Unregister the algorithms in crypto framework */
2220 static void sa_unregister_algos(const struct device *dev)
2221 {
2222         u32 type;
2223         int i;
2224
2225         for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2226                 type = sa_algs[i].type;
2227                 if (!sa_algs[i].registered)
2228                         continue;
2229                 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2230                         crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2231                 else if (type == CRYPTO_ALG_TYPE_AHASH)
2232                         crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2233                 else if (type == CRYPTO_ALG_TYPE_AEAD)
2234                         crypto_unregister_aead(&sa_algs[i].alg.aead);
2235
2236                 sa_algs[i].registered = false;
2237         }
2238 }
2239
2240 static int sa_init_mem(struct sa_crypto_data *dev_data)
2241 {
2242         struct device *dev = &dev_data->pdev->dev;
2243         /* Setup dma pool for security context buffers */
2244         dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2245                                             SA_CTX_MAX_SZ, 64, 0);
2246         if (!dev_data->sc_pool) {
2247                 dev_err(dev, "Failed to create dma pool");
2248                 return -ENOMEM;
2249         }
2250
2251         return 0;
2252 }
2253
2254 static int sa_dma_init(struct sa_crypto_data *dd)
2255 {
2256         int ret;
2257         struct dma_slave_config cfg;
2258
2259         dd->dma_rx1 = NULL;
2260         dd->dma_tx = NULL;
2261         dd->dma_rx2 = NULL;
2262
2263         ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2264         if (ret)
2265                 return ret;
2266
2267         dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2268         if (IS_ERR(dd->dma_rx1))
2269                 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2270                                      "Unable to request rx1 DMA channel\n");
2271
2272         dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2273         if (IS_ERR(dd->dma_rx2)) {
2274                 dma_release_channel(dd->dma_rx1);
2275                 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2276                                      "Unable to request rx2 DMA channel\n");
2277         }
2278
2279         dd->dma_tx = dma_request_chan(dd->dev, "tx");
2280         if (IS_ERR(dd->dma_tx)) {
2281                 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2282                                     "Unable to request tx DMA channel\n");
2283                 goto err_dma_tx;
2284         }
2285
2286         memzero_explicit(&cfg, sizeof(cfg));
2287
2288         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2289         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2290         cfg.src_maxburst = 4;
2291         cfg.dst_maxburst = 4;
2292
2293         ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2294         if (ret) {
2295                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2296                         ret);
2297                 return ret;
2298         }
2299
2300         ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2301         if (ret) {
2302                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2303                         ret);
2304                 return ret;
2305         }
2306
2307         ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2308         if (ret) {
2309                 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2310                         ret);
2311                 return ret;
2312         }
2313
2314         return 0;
2315
2316 err_dma_tx:
2317         dma_release_channel(dd->dma_rx1);
2318         dma_release_channel(dd->dma_rx2);
2319
2320         return ret;
2321 }
2322
2323 static int sa_link_child(struct device *dev, void *data)
2324 {
2325         struct device *parent = data;
2326
2327         device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2328
2329         return 0;
2330 }
2331
2332 static int sa_ul_probe(struct platform_device *pdev)
2333 {
2334         struct device *dev = &pdev->dev;
2335         struct device_node *node = dev->of_node;
2336         struct resource *res;
2337         static void __iomem *saul_base;
2338         struct sa_crypto_data *dev_data;
2339         u32 val;
2340         int ret;
2341
2342         dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2343         if (!dev_data)
2344                 return -ENOMEM;
2345
2346         sa_k3_dev = dev;
2347         dev_data->dev = dev;
2348         dev_data->pdev = pdev;
2349         platform_set_drvdata(pdev, dev_data);
2350         dev_set_drvdata(sa_k3_dev, dev_data);
2351
2352         pm_runtime_enable(dev);
2353         ret = pm_runtime_get_sync(dev);
2354         if (ret < 0) {
2355                 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
2356                         ret);
2357                 return ret;
2358         }
2359
2360         sa_init_mem(dev_data);
2361         ret = sa_dma_init(dev_data);
2362         if (ret)
2363                 goto disable_pm_runtime;
2364
2365         spin_lock_init(&dev_data->scid_lock);
2366         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2367         saul_base = devm_ioremap_resource(dev, res);
2368
2369         dev_data->base = saul_base;
2370         val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2371             SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2372             SA_EEC_TRNG_EN;
2373
2374         writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2375
2376         sa_register_algos(dev);
2377
2378         ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2379         if (ret)
2380                 goto release_dma;
2381
2382         device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
2383
2384         return 0;
2385
2386 release_dma:
2387         sa_unregister_algos(&pdev->dev);
2388
2389         dma_release_channel(dev_data->dma_rx2);
2390         dma_release_channel(dev_data->dma_rx1);
2391         dma_release_channel(dev_data->dma_tx);
2392
2393         dma_pool_destroy(dev_data->sc_pool);
2394
2395 disable_pm_runtime:
2396         pm_runtime_put_sync(&pdev->dev);
2397         pm_runtime_disable(&pdev->dev);
2398
2399         return ret;
2400 }
2401
2402 static int sa_ul_remove(struct platform_device *pdev)
2403 {
2404         struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2405
2406         sa_unregister_algos(&pdev->dev);
2407
2408         dma_release_channel(dev_data->dma_rx2);
2409         dma_release_channel(dev_data->dma_rx1);
2410         dma_release_channel(dev_data->dma_tx);
2411
2412         dma_pool_destroy(dev_data->sc_pool);
2413
2414         platform_set_drvdata(pdev, NULL);
2415
2416         pm_runtime_put_sync(&pdev->dev);
2417         pm_runtime_disable(&pdev->dev);
2418
2419         return 0;
2420 }
2421
2422 static const struct of_device_id of_match[] = {
2423         {.compatible = "ti,j721e-sa2ul",},
2424         {.compatible = "ti,am654-sa2ul",},
2425         {},
2426 };
2427 MODULE_DEVICE_TABLE(of, of_match);
2428
2429 static struct platform_driver sa_ul_driver = {
2430         .probe = sa_ul_probe,
2431         .remove = sa_ul_remove,
2432         .driver = {
2433                    .name = "saul-crypto",
2434                    .of_match_table = of_match,
2435                    },
2436 };
2437 module_platform_driver(sa_ul_driver);
2438 MODULE_LICENSE("GPL v2");