Merge branch 'for-5.4/apple' into for-linus
[linux-2.6-microblaze.git] / drivers / crypto / ccp / ccp-dev-v5.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Cryptographic Coprocessor (CCP) driver
4  *
5  * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
6  *
7  * Author: Gary R Hook <gary.hook@amd.com>
8  */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/compiler.h>
18 #include <linux/ccp.h>
19
20 #include "ccp-dev.h"
21
22 /* Allocate the requested number of contiguous LSB slots
23  * from the LSB bitmap. Look in the private range for this
24  * queue first; failing that, check the public area.
25  * If no space is available, wait around.
26  * Return: first slot number
27  */
28 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
29 {
30         struct ccp_device *ccp;
31         int start;
32
33         /* First look at the map for the queue */
34         if (cmd_q->lsb >= 0) {
35                 start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
36                                                         LSB_SIZE,
37                                                         0, count, 0);
38                 if (start < LSB_SIZE) {
39                         bitmap_set(cmd_q->lsbmap, start, count);
40                         return start + cmd_q->lsb * LSB_SIZE;
41                 }
42         }
43
44         /* No joy; try to get an entry from the shared blocks */
45         ccp = cmd_q->ccp;
46         for (;;) {
47                 mutex_lock(&ccp->sb_mutex);
48
49                 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
50                                                         MAX_LSB_CNT * LSB_SIZE,
51                                                         0,
52                                                         count, 0);
53                 if (start <= MAX_LSB_CNT * LSB_SIZE) {
54                         bitmap_set(ccp->lsbmap, start, count);
55
56                         mutex_unlock(&ccp->sb_mutex);
57                         return start;
58                 }
59
60                 ccp->sb_avail = 0;
61
62                 mutex_unlock(&ccp->sb_mutex);
63
64                 /* Wait for KSB entries to become available */
65                 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
66                         return 0;
67         }
68 }
69
70 /* Free a number of LSB slots from the bitmap, starting at
71  * the indicated starting slot number.
72  */
73 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
74                          unsigned int count)
75 {
76         if (!start)
77                 return;
78
79         if (cmd_q->lsb == start) {
80                 /* An entry from the private LSB */
81                 bitmap_clear(cmd_q->lsbmap, start, count);
82         } else {
83                 /* From the shared LSBs */
84                 struct ccp_device *ccp = cmd_q->ccp;
85
86                 mutex_lock(&ccp->sb_mutex);
87                 bitmap_clear(ccp->lsbmap, start, count);
88                 ccp->sb_avail = 1;
89                 mutex_unlock(&ccp->sb_mutex);
90                 wake_up_interruptible_all(&ccp->sb_queue);
91         }
92 }
93
94 /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
95 union ccp_function {
96         struct {
97                 u16 size:7;
98                 u16 encrypt:1;
99                 u16 mode:5;
100                 u16 type:2;
101         } aes;
102         struct {
103                 u16 size:7;
104                 u16 encrypt:1;
105                 u16 rsvd:5;
106                 u16 type:2;
107         } aes_xts;
108         struct {
109                 u16 size:7;
110                 u16 encrypt:1;
111                 u16 mode:5;
112                 u16 type:2;
113         } des3;
114         struct {
115                 u16 rsvd1:10;
116                 u16 type:4;
117                 u16 rsvd2:1;
118         } sha;
119         struct {
120                 u16 mode:3;
121                 u16 size:12;
122         } rsa;
123         struct {
124                 u16 byteswap:2;
125                 u16 bitwise:3;
126                 u16 reflect:2;
127                 u16 rsvd:8;
128         } pt;
129         struct  {
130                 u16 rsvd:13;
131         } zlib;
132         struct {
133                 u16 size:10;
134                 u16 type:2;
135                 u16 mode:3;
136         } ecc;
137         u16 raw;
138 };
139
140 #define CCP_AES_SIZE(p)         ((p)->aes.size)
141 #define CCP_AES_ENCRYPT(p)      ((p)->aes.encrypt)
142 #define CCP_AES_MODE(p)         ((p)->aes.mode)
143 #define CCP_AES_TYPE(p)         ((p)->aes.type)
144 #define CCP_XTS_SIZE(p)         ((p)->aes_xts.size)
145 #define CCP_XTS_TYPE(p)         ((p)->aes_xts.type)
146 #define CCP_XTS_ENCRYPT(p)      ((p)->aes_xts.encrypt)
147 #define CCP_DES3_SIZE(p)        ((p)->des3.size)
148 #define CCP_DES3_ENCRYPT(p)     ((p)->des3.encrypt)
149 #define CCP_DES3_MODE(p)        ((p)->des3.mode)
150 #define CCP_DES3_TYPE(p)        ((p)->des3.type)
151 #define CCP_SHA_TYPE(p)         ((p)->sha.type)
152 #define CCP_RSA_SIZE(p)         ((p)->rsa.size)
153 #define CCP_PT_BYTESWAP(p)      ((p)->pt.byteswap)
154 #define CCP_PT_BITWISE(p)       ((p)->pt.bitwise)
155 #define CCP_ECC_MODE(p)         ((p)->ecc.mode)
156 #define CCP_ECC_AFFINE(p)       ((p)->ecc.one)
157
158 /* Word 0 */
159 #define CCP5_CMD_DW0(p)         ((p)->dw0)
160 #define CCP5_CMD_SOC(p)         (CCP5_CMD_DW0(p).soc)
161 #define CCP5_CMD_IOC(p)         (CCP5_CMD_DW0(p).ioc)
162 #define CCP5_CMD_INIT(p)        (CCP5_CMD_DW0(p).init)
163 #define CCP5_CMD_EOM(p)         (CCP5_CMD_DW0(p).eom)
164 #define CCP5_CMD_FUNCTION(p)    (CCP5_CMD_DW0(p).function)
165 #define CCP5_CMD_ENGINE(p)      (CCP5_CMD_DW0(p).engine)
166 #define CCP5_CMD_PROT(p)        (CCP5_CMD_DW0(p).prot)
167
168 /* Word 1 */
169 #define CCP5_CMD_DW1(p)         ((p)->length)
170 #define CCP5_CMD_LEN(p)         (CCP5_CMD_DW1(p))
171
172 /* Word 2 */
173 #define CCP5_CMD_DW2(p)         ((p)->src_lo)
174 #define CCP5_CMD_SRC_LO(p)      (CCP5_CMD_DW2(p))
175
176 /* Word 3 */
177 #define CCP5_CMD_DW3(p)         ((p)->dw3)
178 #define CCP5_CMD_SRC_MEM(p)     ((p)->dw3.src_mem)
179 #define CCP5_CMD_SRC_HI(p)      ((p)->dw3.src_hi)
180 #define CCP5_CMD_LSB_ID(p)      ((p)->dw3.lsb_cxt_id)
181 #define CCP5_CMD_FIX_SRC(p)     ((p)->dw3.fixed)
182
183 /* Words 4/5 */
184 #define CCP5_CMD_DW4(p)         ((p)->dw4)
185 #define CCP5_CMD_DST_LO(p)      (CCP5_CMD_DW4(p).dst_lo)
186 #define CCP5_CMD_DW5(p)         ((p)->dw5.fields.dst_hi)
187 #define CCP5_CMD_DST_HI(p)      (CCP5_CMD_DW5(p))
188 #define CCP5_CMD_DST_MEM(p)     ((p)->dw5.fields.dst_mem)
189 #define CCP5_CMD_FIX_DST(p)     ((p)->dw5.fields.fixed)
190 #define CCP5_CMD_SHA_LO(p)      ((p)->dw4.sha_len_lo)
191 #define CCP5_CMD_SHA_HI(p)      ((p)->dw5.sha_len_hi)
192
193 /* Word 6/7 */
194 #define CCP5_CMD_DW6(p)         ((p)->key_lo)
195 #define CCP5_CMD_KEY_LO(p)      (CCP5_CMD_DW6(p))
196 #define CCP5_CMD_DW7(p)         ((p)->dw7)
197 #define CCP5_CMD_KEY_HI(p)      ((p)->dw7.key_hi)
198 #define CCP5_CMD_KEY_MEM(p)     ((p)->dw7.key_mem)
199
200 static inline u32 low_address(unsigned long addr)
201 {
202         return (u64)addr & 0x0ffffffff;
203 }
204
205 static inline u32 high_address(unsigned long addr)
206 {
207         return ((u64)addr >> 32) & 0x00000ffff;
208 }
209
210 static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
211 {
212         unsigned int head_idx, n;
213         u32 head_lo, queue_start;
214
215         queue_start = low_address(cmd_q->qdma_tail);
216         head_lo = ioread32(cmd_q->reg_head_lo);
217         head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
218
219         n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
220
221         return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
222 }
223
224 static int ccp5_do_cmd(struct ccp5_desc *desc,
225                        struct ccp_cmd_queue *cmd_q)
226 {
227         u32 *mP;
228         __le32 *dP;
229         u32 tail;
230         int     i;
231         int ret = 0;
232
233         cmd_q->total_ops++;
234
235         if (CCP5_CMD_SOC(desc)) {
236                 CCP5_CMD_IOC(desc) = 1;
237                 CCP5_CMD_SOC(desc) = 0;
238         }
239         mutex_lock(&cmd_q->q_mutex);
240
241         mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
242         dP = (__le32 *) desc;
243         for (i = 0; i < 8; i++)
244                 mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
245
246         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
247
248         /* The data used by this command must be flushed to memory */
249         wmb();
250
251         /* Write the new tail address back to the queue register */
252         tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
253         iowrite32(tail, cmd_q->reg_tail_lo);
254
255         /* Turn the queue back on using our cached control register */
256         iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
257         mutex_unlock(&cmd_q->q_mutex);
258
259         if (CCP5_CMD_IOC(desc)) {
260                 /* Wait for the job to complete */
261                 ret = wait_event_interruptible(cmd_q->int_queue,
262                                                cmd_q->int_rcvd);
263                 if (ret || cmd_q->cmd_error) {
264                         /* Log the error and flush the queue by
265                          * moving the head pointer
266                          */
267                         if (cmd_q->cmd_error)
268                                 ccp_log_error(cmd_q->ccp,
269                                               cmd_q->cmd_error);
270                         iowrite32(tail, cmd_q->reg_head_lo);
271                         if (!ret)
272                                 ret = -EIO;
273                 }
274                 cmd_q->int_rcvd = 0;
275         }
276
277         return ret;
278 }
279
280 static int ccp5_perform_aes(struct ccp_op *op)
281 {
282         struct ccp5_desc desc;
283         union ccp_function function;
284         u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
285
286         op->cmd_q->total_aes_ops++;
287
288         /* Zero out all the fields of the command desc */
289         memset(&desc, 0, Q_DESC_SIZE);
290
291         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
292
293         CCP5_CMD_SOC(&desc) = op->soc;
294         CCP5_CMD_IOC(&desc) = 1;
295         CCP5_CMD_INIT(&desc) = op->init;
296         CCP5_CMD_EOM(&desc) = op->eom;
297         CCP5_CMD_PROT(&desc) = 0;
298
299         function.raw = 0;
300         CCP_AES_ENCRYPT(&function) = op->u.aes.action;
301         CCP_AES_MODE(&function) = op->u.aes.mode;
302         CCP_AES_TYPE(&function) = op->u.aes.type;
303         CCP_AES_SIZE(&function) = op->u.aes.size;
304
305         CCP5_CMD_FUNCTION(&desc) = function.raw;
306
307         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
308
309         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
310         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
311         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
312
313         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
314         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
315         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
316
317         CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
318         CCP5_CMD_KEY_HI(&desc) = 0;
319         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
320         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
321
322         return ccp5_do_cmd(&desc, op->cmd_q);
323 }
324
325 static int ccp5_perform_xts_aes(struct ccp_op *op)
326 {
327         struct ccp5_desc desc;
328         union ccp_function function;
329         u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
330
331         op->cmd_q->total_xts_aes_ops++;
332
333         /* Zero out all the fields of the command desc */
334         memset(&desc, 0, Q_DESC_SIZE);
335
336         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
337
338         CCP5_CMD_SOC(&desc) = op->soc;
339         CCP5_CMD_IOC(&desc) = 1;
340         CCP5_CMD_INIT(&desc) = op->init;
341         CCP5_CMD_EOM(&desc) = op->eom;
342         CCP5_CMD_PROT(&desc) = 0;
343
344         function.raw = 0;
345         CCP_XTS_TYPE(&function) = op->u.xts.type;
346         CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
347         CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
348         CCP5_CMD_FUNCTION(&desc) = function.raw;
349
350         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
351
352         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
353         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
354         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
355
356         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
357         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
358         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
359
360         CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
361         CCP5_CMD_KEY_HI(&desc) =  0;
362         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
363         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
364
365         return ccp5_do_cmd(&desc, op->cmd_q);
366 }
367
368 static int ccp5_perform_sha(struct ccp_op *op)
369 {
370         struct ccp5_desc desc;
371         union ccp_function function;
372
373         op->cmd_q->total_sha_ops++;
374
375         /* Zero out all the fields of the command desc */
376         memset(&desc, 0, Q_DESC_SIZE);
377
378         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
379
380         CCP5_CMD_SOC(&desc) = op->soc;
381         CCP5_CMD_IOC(&desc) = 1;
382         CCP5_CMD_INIT(&desc) = 1;
383         CCP5_CMD_EOM(&desc) = op->eom;
384         CCP5_CMD_PROT(&desc) = 0;
385
386         function.raw = 0;
387         CCP_SHA_TYPE(&function) = op->u.sha.type;
388         CCP5_CMD_FUNCTION(&desc) = function.raw;
389
390         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
391
392         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
393         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
394         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
395
396         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
397
398         if (op->eom) {
399                 CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
400                 CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
401         } else {
402                 CCP5_CMD_SHA_LO(&desc) = 0;
403                 CCP5_CMD_SHA_HI(&desc) = 0;
404         }
405
406         return ccp5_do_cmd(&desc, op->cmd_q);
407 }
408
409 static int ccp5_perform_des3(struct ccp_op *op)
410 {
411         struct ccp5_desc desc;
412         union ccp_function function;
413         u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
414
415         op->cmd_q->total_3des_ops++;
416
417         /* Zero out all the fields of the command desc */
418         memset(&desc, 0, sizeof(struct ccp5_desc));
419
420         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
421
422         CCP5_CMD_SOC(&desc) = op->soc;
423         CCP5_CMD_IOC(&desc) = 1;
424         CCP5_CMD_INIT(&desc) = op->init;
425         CCP5_CMD_EOM(&desc) = op->eom;
426         CCP5_CMD_PROT(&desc) = 0;
427
428         function.raw = 0;
429         CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
430         CCP_DES3_MODE(&function) = op->u.des3.mode;
431         CCP_DES3_TYPE(&function) = op->u.des3.type;
432         CCP5_CMD_FUNCTION(&desc) = function.raw;
433
434         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
435
436         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
437         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
438         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
439
440         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
441         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
442         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
443
444         CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
445         CCP5_CMD_KEY_HI(&desc) = 0;
446         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
447         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
448
449         return ccp5_do_cmd(&desc, op->cmd_q);
450 }
451
452 static int ccp5_perform_rsa(struct ccp_op *op)
453 {
454         struct ccp5_desc desc;
455         union ccp_function function;
456
457         op->cmd_q->total_rsa_ops++;
458
459         /* Zero out all the fields of the command desc */
460         memset(&desc, 0, Q_DESC_SIZE);
461
462         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
463
464         CCP5_CMD_SOC(&desc) = op->soc;
465         CCP5_CMD_IOC(&desc) = 1;
466         CCP5_CMD_INIT(&desc) = 0;
467         CCP5_CMD_EOM(&desc) = 1;
468         CCP5_CMD_PROT(&desc) = 0;
469
470         function.raw = 0;
471         CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
472         CCP5_CMD_FUNCTION(&desc) = function.raw;
473
474         CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
475
476         /* Source is from external memory */
477         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
478         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
479         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
480
481         /* Destination is in external memory */
482         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
483         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
484         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
485
486         /* Key (Exponent) is in external memory */
487         CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
488         CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
489         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
490
491         return ccp5_do_cmd(&desc, op->cmd_q);
492 }
493
494 static int ccp5_perform_passthru(struct ccp_op *op)
495 {
496         struct ccp5_desc desc;
497         union ccp_function function;
498         struct ccp_dma_info *saddr = &op->src.u.dma;
499         struct ccp_dma_info *daddr = &op->dst.u.dma;
500
501
502         op->cmd_q->total_pt_ops++;
503
504         memset(&desc, 0, Q_DESC_SIZE);
505
506         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
507
508         CCP5_CMD_SOC(&desc) = 0;
509         CCP5_CMD_IOC(&desc) = 1;
510         CCP5_CMD_INIT(&desc) = 0;
511         CCP5_CMD_EOM(&desc) = op->eom;
512         CCP5_CMD_PROT(&desc) = 0;
513
514         function.raw = 0;
515         CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
516         CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
517         CCP5_CMD_FUNCTION(&desc) = function.raw;
518
519         /* Length of source data is always 256 bytes */
520         if (op->src.type == CCP_MEMTYPE_SYSTEM)
521                 CCP5_CMD_LEN(&desc) = saddr->length;
522         else
523                 CCP5_CMD_LEN(&desc) = daddr->length;
524
525         if (op->src.type == CCP_MEMTYPE_SYSTEM) {
526                 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
527                 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
528                 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
529
530                 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
531                         CCP5_CMD_LSB_ID(&desc) = op->sb_key;
532         } else {
533                 u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
534
535                 CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
536                 CCP5_CMD_SRC_HI(&desc) = 0;
537                 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
538         }
539
540         if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
541                 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
542                 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
543                 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
544         } else {
545                 u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
546
547                 CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
548                 CCP5_CMD_DST_HI(&desc) = 0;
549                 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
550         }
551
552         return ccp5_do_cmd(&desc, op->cmd_q);
553 }
554
555 static int ccp5_perform_ecc(struct ccp_op *op)
556 {
557         struct ccp5_desc desc;
558         union ccp_function function;
559
560         op->cmd_q->total_ecc_ops++;
561
562         /* Zero out all the fields of the command desc */
563         memset(&desc, 0, Q_DESC_SIZE);
564
565         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
566
567         CCP5_CMD_SOC(&desc) = 0;
568         CCP5_CMD_IOC(&desc) = 1;
569         CCP5_CMD_INIT(&desc) = 0;
570         CCP5_CMD_EOM(&desc) = 1;
571         CCP5_CMD_PROT(&desc) = 0;
572
573         function.raw = 0;
574         function.ecc.mode = op->u.ecc.function;
575         CCP5_CMD_FUNCTION(&desc) = function.raw;
576
577         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
578
579         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
580         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
581         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
582
583         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
584         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
585         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
586
587         return ccp5_do_cmd(&desc, op->cmd_q);
588 }
589
590 static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
591 {
592         int q_mask = 1 << cmd_q->id;
593         int queues = 0;
594         int j;
595
596         /* Build a bit mask to know which LSBs this queue has access to.
597          * Don't bother with segment 0 as it has special privileges.
598          */
599         for (j = 1; j < MAX_LSB_CNT; j++) {
600                 if (status & q_mask)
601                         bitmap_set(cmd_q->lsbmask, j, 1);
602                 status >>= LSB_REGION_WIDTH;
603         }
604         queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
605         dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
606                  cmd_q->id, queues);
607
608         return queues ? 0 : -EINVAL;
609 }
610
611 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
612                                         int lsb_cnt, int n_lsbs,
613                                         unsigned long *lsb_pub)
614 {
615         DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
616         int bitno;
617         int qlsb_wgt;
618         int i;
619
620         /* For each queue:
621          * If the count of potential LSBs available to a queue matches the
622          * ordinal given to us in lsb_cnt:
623          * Copy the mask of possible LSBs for this queue into "qlsb";
624          * For each bit in qlsb, see if the corresponding bit in the
625          * aggregation mask is set; if so, we have a match.
626          *     If we have a match, clear the bit in the aggregation to
627          *     mark it as no longer available.
628          *     If there is no match, clear the bit in qlsb and keep looking.
629          */
630         for (i = 0; i < ccp->cmd_q_count; i++) {
631                 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
632
633                 qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
634
635                 if (qlsb_wgt == lsb_cnt) {
636                         bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
637
638                         bitno = find_first_bit(qlsb, MAX_LSB_CNT);
639                         while (bitno < MAX_LSB_CNT) {
640                                 if (test_bit(bitno, lsb_pub)) {
641                                         /* We found an available LSB
642                                          * that this queue can access
643                                          */
644                                         cmd_q->lsb = bitno;
645                                         bitmap_clear(lsb_pub, bitno, 1);
646                                         dev_dbg(ccp->dev,
647                                                  "Queue %d gets LSB %d\n",
648                                                  i, bitno);
649                                         break;
650                                 }
651                                 bitmap_clear(qlsb, bitno, 1);
652                                 bitno = find_first_bit(qlsb, MAX_LSB_CNT);
653                         }
654                         if (bitno >= MAX_LSB_CNT)
655                                 return -EINVAL;
656                         n_lsbs--;
657                 }
658         }
659         return n_lsbs;
660 }
661
662 /* For each queue, from the most- to least-constrained:
663  * find an LSB that can be assigned to the queue. If there are N queues that
664  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
665  * dedicated LSB. Remaining LSB regions become a shared resource.
666  * If we have fewer LSBs than queues, all LSB regions become shared resources.
667  */
668 static int ccp_assign_lsbs(struct ccp_device *ccp)
669 {
670         DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
671         DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
672         int n_lsbs = 0;
673         int bitno;
674         int i, lsb_cnt;
675         int rc = 0;
676
677         bitmap_zero(lsb_pub, MAX_LSB_CNT);
678
679         /* Create an aggregate bitmap to get a total count of available LSBs */
680         for (i = 0; i < ccp->cmd_q_count; i++)
681                 bitmap_or(lsb_pub,
682                           lsb_pub, ccp->cmd_q[i].lsbmask,
683                           MAX_LSB_CNT);
684
685         n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
686
687         if (n_lsbs >= ccp->cmd_q_count) {
688                 /* We have enough LSBS to give every queue a private LSB.
689                  * Brute force search to start with the queues that are more
690                  * constrained in LSB choice. When an LSB is privately
691                  * assigned, it is removed from the public mask.
692                  * This is an ugly N squared algorithm with some optimization.
693                  */
694                 for (lsb_cnt = 1;
695                      n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
696                      lsb_cnt++) {
697                         rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
698                                                           lsb_pub);
699                         if (rc < 0)
700                                 return -EINVAL;
701                         n_lsbs = rc;
702                 }
703         }
704
705         rc = 0;
706         /* What's left of the LSBs, according to the public mask, now become
707          * shared. Any zero bits in the lsb_pub mask represent an LSB region
708          * that can't be used as a shared resource, so mark the LSB slots for
709          * them as "in use".
710          */
711         bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
712
713         bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
714         while (bitno < MAX_LSB_CNT) {
715                 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
716                 bitmap_set(qlsb, bitno, 1);
717                 bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
718         }
719
720         return rc;
721 }
722
723 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
724 {
725         unsigned int i;
726
727         for (i = 0; i < ccp->cmd_q_count; i++)
728                 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
729 }
730
731 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
732 {
733         unsigned int i;
734
735         for (i = 0; i < ccp->cmd_q_count; i++)
736                 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
737 }
738
739 static void ccp5_irq_bh(unsigned long data)
740 {
741         struct ccp_device *ccp = (struct ccp_device *)data;
742         u32 status;
743         unsigned int i;
744
745         for (i = 0; i < ccp->cmd_q_count; i++) {
746                 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
747
748                 status = ioread32(cmd_q->reg_interrupt_status);
749
750                 if (status) {
751                         cmd_q->int_status = status;
752                         cmd_q->q_status = ioread32(cmd_q->reg_status);
753                         cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
754
755                         /* On error, only save the first error value */
756                         if ((status & INT_ERROR) && !cmd_q->cmd_error)
757                                 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
758
759                         cmd_q->int_rcvd = 1;
760
761                         /* Acknowledge the interrupt and wake the kthread */
762                         iowrite32(status, cmd_q->reg_interrupt_status);
763                         wake_up_interruptible(&cmd_q->int_queue);
764                 }
765         }
766         ccp5_enable_queue_interrupts(ccp);
767 }
768
769 static irqreturn_t ccp5_irq_handler(int irq, void *data)
770 {
771         struct ccp_device *ccp = (struct ccp_device *)data;
772
773         ccp5_disable_queue_interrupts(ccp);
774         ccp->total_interrupts++;
775         if (ccp->use_tasklet)
776                 tasklet_schedule(&ccp->irq_tasklet);
777         else
778                 ccp5_irq_bh((unsigned long)ccp);
779         return IRQ_HANDLED;
780 }
781
782 static int ccp5_init(struct ccp_device *ccp)
783 {
784         struct device *dev = ccp->dev;
785         struct ccp_cmd_queue *cmd_q;
786         struct dma_pool *dma_pool;
787         char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
788         unsigned int qmr, i;
789         u64 status;
790         u32 status_lo, status_hi;
791         int ret;
792
793         /* Find available queues */
794         qmr = ioread32(ccp->io_regs + Q_MASK_REG);
795         for (i = 0; i < MAX_HW_QUEUES; i++) {
796
797                 if (!(qmr & (1 << i)))
798                         continue;
799
800                 /* Allocate a dma pool for this queue */
801                 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
802                          ccp->name, i);
803                 dma_pool = dma_pool_create(dma_pool_name, dev,
804                                            CCP_DMAPOOL_MAX_SIZE,
805                                            CCP_DMAPOOL_ALIGN, 0);
806                 if (!dma_pool) {
807                         dev_err(dev, "unable to allocate dma pool\n");
808                         ret = -ENOMEM;
809                 }
810
811                 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
812                 ccp->cmd_q_count++;
813
814                 cmd_q->ccp = ccp;
815                 cmd_q->id = i;
816                 cmd_q->dma_pool = dma_pool;
817                 mutex_init(&cmd_q->q_mutex);
818
819                 /* Page alignment satisfies our needs for N <= 128 */
820                 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
821                 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
822                 cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
823                                                   &cmd_q->qbase_dma,
824                                                   GFP_KERNEL);
825                 if (!cmd_q->qbase) {
826                         dev_err(dev, "unable to allocate command queue\n");
827                         ret = -ENOMEM;
828                         goto e_pool;
829                 }
830
831                 cmd_q->qidx = 0;
832                 /* Preset some register values and masks that are queue
833                  * number dependent
834                  */
835                 cmd_q->reg_control = ccp->io_regs +
836                                      CMD5_Q_STATUS_INCR * (i + 1);
837                 cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
838                 cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
839                 cmd_q->reg_int_enable = cmd_q->reg_control +
840                                         CMD5_Q_INT_ENABLE_BASE;
841                 cmd_q->reg_interrupt_status = cmd_q->reg_control +
842                                               CMD5_Q_INTERRUPT_STATUS_BASE;
843                 cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
844                 cmd_q->reg_int_status = cmd_q->reg_control +
845                                         CMD5_Q_INT_STATUS_BASE;
846                 cmd_q->reg_dma_status = cmd_q->reg_control +
847                                         CMD5_Q_DMA_STATUS_BASE;
848                 cmd_q->reg_dma_read_status = cmd_q->reg_control +
849                                              CMD5_Q_DMA_READ_STATUS_BASE;
850                 cmd_q->reg_dma_write_status = cmd_q->reg_control +
851                                               CMD5_Q_DMA_WRITE_STATUS_BASE;
852
853                 init_waitqueue_head(&cmd_q->int_queue);
854
855                 dev_dbg(dev, "queue #%u available\n", i);
856         }
857
858         if (ccp->cmd_q_count == 0) {
859                 dev_notice(dev, "no command queues available\n");
860                 ret = -EIO;
861                 goto e_pool;
862         }
863
864         /* Turn off the queues and disable interrupts until ready */
865         ccp5_disable_queue_interrupts(ccp);
866         for (i = 0; i < ccp->cmd_q_count; i++) {
867                 cmd_q = &ccp->cmd_q[i];
868
869                 cmd_q->qcontrol = 0; /* Start with nothing */
870                 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
871
872                 ioread32(cmd_q->reg_int_status);
873                 ioread32(cmd_q->reg_status);
874
875                 /* Clear the interrupt status */
876                 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
877         }
878
879         dev_dbg(dev, "Requesting an IRQ...\n");
880         /* Request an irq */
881         ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
882         if (ret) {
883                 dev_err(dev, "unable to allocate an IRQ\n");
884                 goto e_pool;
885         }
886         /* Initialize the ISR tasklet */
887         if (ccp->use_tasklet)
888                 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
889                              (unsigned long)ccp);
890
891         dev_dbg(dev, "Loading LSB map...\n");
892         /* Copy the private LSB mask to the public registers */
893         status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
894         status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
895         iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
896         iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
897         status = ((u64)status_hi<<30) | (u64)status_lo;
898
899         dev_dbg(dev, "Configuring virtual queues...\n");
900         /* Configure size of each virtual queue accessible to host */
901         for (i = 0; i < ccp->cmd_q_count; i++) {
902                 u32 dma_addr_lo;
903                 u32 dma_addr_hi;
904
905                 cmd_q = &ccp->cmd_q[i];
906
907                 cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
908                 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
909
910                 cmd_q->qdma_tail = cmd_q->qbase_dma;
911                 dma_addr_lo = low_address(cmd_q->qdma_tail);
912                 iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
913                 iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
914
915                 dma_addr_hi = high_address(cmd_q->qdma_tail);
916                 cmd_q->qcontrol |= (dma_addr_hi << 16);
917                 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
918
919                 /* Find the LSB regions accessible to the queue */
920                 ccp_find_lsb_regions(cmd_q, status);
921                 cmd_q->lsb = -1; /* Unassigned value */
922         }
923
924         dev_dbg(dev, "Assigning LSBs...\n");
925         ret = ccp_assign_lsbs(ccp);
926         if (ret) {
927                 dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
928                 goto e_irq;
929         }
930
931         /* Optimization: pre-allocate LSB slots for each queue */
932         for (i = 0; i < ccp->cmd_q_count; i++) {
933                 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
934                 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
935         }
936
937         dev_dbg(dev, "Starting threads...\n");
938         /* Create a kthread for each queue */
939         for (i = 0; i < ccp->cmd_q_count; i++) {
940                 struct task_struct *kthread;
941
942                 cmd_q = &ccp->cmd_q[i];
943
944                 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
945                                          "%s-q%u", ccp->name, cmd_q->id);
946                 if (IS_ERR(kthread)) {
947                         dev_err(dev, "error creating queue thread (%ld)\n",
948                                 PTR_ERR(kthread));
949                         ret = PTR_ERR(kthread);
950                         goto e_kthread;
951                 }
952
953                 cmd_q->kthread = kthread;
954                 wake_up_process(kthread);
955         }
956
957         dev_dbg(dev, "Enabling interrupts...\n");
958         ccp5_enable_queue_interrupts(ccp);
959
960         dev_dbg(dev, "Registering device...\n");
961         /* Put this on the unit list to make it available */
962         ccp_add_device(ccp);
963
964         ret = ccp_register_rng(ccp);
965         if (ret)
966                 goto e_kthread;
967
968         /* Register the DMA engine support */
969         ret = ccp_dmaengine_register(ccp);
970         if (ret)
971                 goto e_hwrng;
972
973         /* Set up debugfs entries */
974         ccp5_debugfs_setup(ccp);
975
976         return 0;
977
978 e_hwrng:
979         ccp_unregister_rng(ccp);
980
981 e_kthread:
982         for (i = 0; i < ccp->cmd_q_count; i++)
983                 if (ccp->cmd_q[i].kthread)
984                         kthread_stop(ccp->cmd_q[i].kthread);
985
986 e_irq:
987         sp_free_ccp_irq(ccp->sp, ccp);
988
989 e_pool:
990         for (i = 0; i < ccp->cmd_q_count; i++)
991                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
992
993         return ret;
994 }
995
996 static void ccp5_destroy(struct ccp_device *ccp)
997 {
998         struct device *dev = ccp->dev;
999         struct ccp_cmd_queue *cmd_q;
1000         struct ccp_cmd *cmd;
1001         unsigned int i;
1002
1003         /* Unregister the DMA engine */
1004         ccp_dmaengine_unregister(ccp);
1005
1006         /* Unregister the RNG */
1007         ccp_unregister_rng(ccp);
1008
1009         /* Remove this device from the list of available units first */
1010         ccp_del_device(ccp);
1011
1012         /* We're in the process of tearing down the entire driver;
1013          * when all the devices are gone clean up debugfs
1014          */
1015         if (ccp_present())
1016                 ccp5_debugfs_destroy();
1017
1018         /* Disable and clear interrupts */
1019         ccp5_disable_queue_interrupts(ccp);
1020         for (i = 0; i < ccp->cmd_q_count; i++) {
1021                 cmd_q = &ccp->cmd_q[i];
1022
1023                 /* Turn off the run bit */
1024                 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
1025
1026                 /* Clear the interrupt status */
1027                 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
1028                 ioread32(cmd_q->reg_int_status);
1029                 ioread32(cmd_q->reg_status);
1030         }
1031
1032         /* Stop the queue kthreads */
1033         for (i = 0; i < ccp->cmd_q_count; i++)
1034                 if (ccp->cmd_q[i].kthread)
1035                         kthread_stop(ccp->cmd_q[i].kthread);
1036
1037         sp_free_ccp_irq(ccp->sp, ccp);
1038
1039         for (i = 0; i < ccp->cmd_q_count; i++) {
1040                 cmd_q = &ccp->cmd_q[i];
1041                 dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
1042                                   cmd_q->qbase_dma);
1043         }
1044
1045         /* Flush the cmd and backlog queue */
1046         while (!list_empty(&ccp->cmd)) {
1047                 /* Invoke the callback directly with an error code */
1048                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
1049                 list_del(&cmd->entry);
1050                 cmd->callback(cmd->data, -ENODEV);
1051         }
1052         while (!list_empty(&ccp->backlog)) {
1053                 /* Invoke the callback directly with an error code */
1054                 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
1055                 list_del(&cmd->entry);
1056                 cmd->callback(cmd->data, -ENODEV);
1057         }
1058 }
1059
1060 static void ccp5_config(struct ccp_device *ccp)
1061 {
1062         /* Public side */
1063         iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
1064 }
1065
1066 static void ccp5other_config(struct ccp_device *ccp)
1067 {
1068         int i;
1069         u32 rnd;
1070
1071         /* We own all of the queues on the NTB CCP */
1072
1073         iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
1074         iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
1075         for (i = 0; i < 12; i++) {
1076                 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
1077                 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
1078         }
1079
1080         iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
1081         iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
1082         iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
1083
1084         iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
1085         iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
1086
1087         iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
1088
1089         ccp5_config(ccp);
1090 }
1091
1092 /* Version 5 adds some function, but is essentially the same as v5 */
1093 static const struct ccp_actions ccp5_actions = {
1094         .aes = ccp5_perform_aes,
1095         .xts_aes = ccp5_perform_xts_aes,
1096         .sha = ccp5_perform_sha,
1097         .des3 = ccp5_perform_des3,
1098         .rsa = ccp5_perform_rsa,
1099         .passthru = ccp5_perform_passthru,
1100         .ecc = ccp5_perform_ecc,
1101         .sballoc = ccp_lsb_alloc,
1102         .sbfree = ccp_lsb_free,
1103         .init = ccp5_init,
1104         .destroy = ccp5_destroy,
1105         .get_free_slots = ccp5_get_free_slots,
1106 };
1107
1108 const struct ccp_vdata ccpv5a = {
1109         .version = CCP_VERSION(5, 0),
1110         .setup = ccp5_config,
1111         .perform = &ccp5_actions,
1112         .offset = 0x0,
1113         .rsamax = CCP5_RSA_MAX_WIDTH,
1114 };
1115
1116 const struct ccp_vdata ccpv5b = {
1117         .version = CCP_VERSION(5, 0),
1118         .dma_chan_attr = DMA_PRIVATE,
1119         .setup = ccp5other_config,
1120         .perform = &ccp5_actions,
1121         .offset = 0x0,
1122         .rsamax = CCP5_RSA_MAX_WIDTH,
1123 };