Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux-2.6-microblaze.git] / drivers / crypto / ccp / ccp-dev-v3.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Cryptographic Coprocessor (CCP) driver
4  *
5  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  * Author: Gary R Hook <gary.hook@amd.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/kthread.h>
15 #include <linux/interrupt.h>
16 #include <linux/ccp.h>
17
18 #include "ccp-dev.h"
19
20 static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
21 {
22         int start;
23         struct ccp_device *ccp = cmd_q->ccp;
24
25         for (;;) {
26                 mutex_lock(&ccp->sb_mutex);
27
28                 start = (u32)bitmap_find_next_zero_area(ccp->sb,
29                                                         ccp->sb_count,
30                                                         ccp->sb_start,
31                                                         count, 0);
32                 if (start <= ccp->sb_count) {
33                         bitmap_set(ccp->sb, start, count);
34
35                         mutex_unlock(&ccp->sb_mutex);
36                         break;
37                 }
38
39                 ccp->sb_avail = 0;
40
41                 mutex_unlock(&ccp->sb_mutex);
42
43                 /* Wait for KSB entries to become available */
44                 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
45                         return 0;
46         }
47
48         return KSB_START + start;
49 }
50
51 static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
52                          unsigned int count)
53 {
54         struct ccp_device *ccp = cmd_q->ccp;
55
56         if (!start)
57                 return;
58
59         mutex_lock(&ccp->sb_mutex);
60
61         bitmap_clear(ccp->sb, start - KSB_START, count);
62
63         ccp->sb_avail = 1;
64
65         mutex_unlock(&ccp->sb_mutex);
66
67         wake_up_interruptible_all(&ccp->sb_queue);
68 }
69
70 static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
71 {
72         return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
73 }
74
75 static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
76 {
77         struct ccp_cmd_queue *cmd_q = op->cmd_q;
78         struct ccp_device *ccp = cmd_q->ccp;
79         void __iomem *cr_addr;
80         u32 cr0, cmd;
81         unsigned int i;
82         int ret = 0;
83
84         /* We could read a status register to see how many free slots
85          * are actually available, but reading that register resets it
86          * and you could lose some error information.
87          */
88         cmd_q->free_slots--;
89
90         cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
91               | (op->jobid << REQ0_JOBID_SHIFT)
92               | REQ0_WAIT_FOR_WRITE;
93
94         if (op->soc)
95                 cr0 |= REQ0_STOP_ON_COMPLETE
96                        | REQ0_INT_ON_COMPLETE;
97
98         if (op->ioc || !cmd_q->free_slots)
99                 cr0 |= REQ0_INT_ON_COMPLETE;
100
101         /* Start at CMD_REQ1 */
102         cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
103
104         mutex_lock(&ccp->req_mutex);
105
106         /* Write CMD_REQ1 through CMD_REQx first */
107         for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
108                 iowrite32(*(cr + i), cr_addr);
109
110         /* Tell the CCP to start */
111         wmb();
112         iowrite32(cr0, ccp->io_regs + CMD_REQ0);
113
114         mutex_unlock(&ccp->req_mutex);
115
116         if (cr0 & REQ0_INT_ON_COMPLETE) {
117                 /* Wait for the job to complete */
118                 ret = wait_event_interruptible(cmd_q->int_queue,
119                                                cmd_q->int_rcvd);
120                 if (ret || cmd_q->cmd_error) {
121                         /* On error delete all related jobs from the queue */
122                         cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
123                               | op->jobid;
124                         if (cmd_q->cmd_error)
125                                 ccp_log_error(cmd_q->ccp,
126                                               cmd_q->cmd_error);
127
128                         iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
129
130                         if (!ret)
131                                 ret = -EIO;
132                 } else if (op->soc) {
133                         /* Delete just head job from the queue on SoC */
134                         cmd = DEL_Q_ACTIVE
135                               | (cmd_q->id << DEL_Q_ID_SHIFT)
136                               | op->jobid;
137
138                         iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
139                 }
140
141                 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
142
143                 cmd_q->int_rcvd = 0;
144         }
145
146         return ret;
147 }
148
149 static int ccp_perform_aes(struct ccp_op *op)
150 {
151         u32 cr[6];
152
153         /* Fill out the register contents for REQ1 through REQ6 */
154         cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
155                 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
156                 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
157                 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
158                 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
159         cr[1] = op->src.u.dma.length - 1;
160         cr[2] = ccp_addr_lo(&op->src.u.dma);
161         cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
162                 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
163                 | ccp_addr_hi(&op->src.u.dma);
164         cr[4] = ccp_addr_lo(&op->dst.u.dma);
165         cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
166                 | ccp_addr_hi(&op->dst.u.dma);
167
168         if (op->u.aes.mode == CCP_AES_MODE_CFB)
169                 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
170
171         if (op->eom)
172                 cr[0] |= REQ1_EOM;
173
174         if (op->init)
175                 cr[0] |= REQ1_INIT;
176
177         return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
178 }
179
180 static int ccp_perform_xts_aes(struct ccp_op *op)
181 {
182         u32 cr[6];
183
184         /* Fill out the register contents for REQ1 through REQ6 */
185         cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
186                 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
187                 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
188                 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
189         cr[1] = op->src.u.dma.length - 1;
190         cr[2] = ccp_addr_lo(&op->src.u.dma);
191         cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
192                 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
193                 | ccp_addr_hi(&op->src.u.dma);
194         cr[4] = ccp_addr_lo(&op->dst.u.dma);
195         cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
196                 | ccp_addr_hi(&op->dst.u.dma);
197
198         if (op->eom)
199                 cr[0] |= REQ1_EOM;
200
201         if (op->init)
202                 cr[0] |= REQ1_INIT;
203
204         return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
205 }
206
207 static int ccp_perform_sha(struct ccp_op *op)
208 {
209         u32 cr[6];
210
211         /* Fill out the register contents for REQ1 through REQ6 */
212         cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
213                 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
214                 | REQ1_INIT;
215         cr[1] = op->src.u.dma.length - 1;
216         cr[2] = ccp_addr_lo(&op->src.u.dma);
217         cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
218                 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
219                 | ccp_addr_hi(&op->src.u.dma);
220
221         if (op->eom) {
222                 cr[0] |= REQ1_EOM;
223                 cr[4] = lower_32_bits(op->u.sha.msg_bits);
224                 cr[5] = upper_32_bits(op->u.sha.msg_bits);
225         } else {
226                 cr[4] = 0;
227                 cr[5] = 0;
228         }
229
230         return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
231 }
232
233 static int ccp_perform_rsa(struct ccp_op *op)
234 {
235         u32 cr[6];
236
237         /* Fill out the register contents for REQ1 through REQ6 */
238         cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
239                 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
240                 | (op->sb_key << REQ1_KEY_KSB_SHIFT)
241                 | REQ1_EOM;
242         cr[1] = op->u.rsa.input_len - 1;
243         cr[2] = ccp_addr_lo(&op->src.u.dma);
244         cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
245                 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
246                 | ccp_addr_hi(&op->src.u.dma);
247         cr[4] = ccp_addr_lo(&op->dst.u.dma);
248         cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
249                 | ccp_addr_hi(&op->dst.u.dma);
250
251         return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
252 }
253
254 static int ccp_perform_passthru(struct ccp_op *op)
255 {
256         u32 cr[6];
257
258         /* Fill out the register contents for REQ1 through REQ6 */
259         cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
260                 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
261                 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
262
263         if (op->src.type == CCP_MEMTYPE_SYSTEM)
264                 cr[1] = op->src.u.dma.length - 1;
265         else
266                 cr[1] = op->dst.u.dma.length - 1;
267
268         if (op->src.type == CCP_MEMTYPE_SYSTEM) {
269                 cr[2] = ccp_addr_lo(&op->src.u.dma);
270                 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
271                         | ccp_addr_hi(&op->src.u.dma);
272
273                 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
274                         cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
275         } else {
276                 cr[2] = op->src.u.sb * CCP_SB_BYTES;
277                 cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
278         }
279
280         if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
281                 cr[4] = ccp_addr_lo(&op->dst.u.dma);
282                 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
283                         | ccp_addr_hi(&op->dst.u.dma);
284         } else {
285                 cr[4] = op->dst.u.sb * CCP_SB_BYTES;
286                 cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
287         }
288
289         if (op->eom)
290                 cr[0] |= REQ1_EOM;
291
292         return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
293 }
294
295 static int ccp_perform_ecc(struct ccp_op *op)
296 {
297         u32 cr[6];
298
299         /* Fill out the register contents for REQ1 through REQ6 */
300         cr[0] = REQ1_ECC_AFFINE_CONVERT
301                 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
302                 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
303                 | REQ1_EOM;
304         cr[1] = op->src.u.dma.length - 1;
305         cr[2] = ccp_addr_lo(&op->src.u.dma);
306         cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
307                 | ccp_addr_hi(&op->src.u.dma);
308         cr[4] = ccp_addr_lo(&op->dst.u.dma);
309         cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
310                 | ccp_addr_hi(&op->dst.u.dma);
311
312         return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
313 }
314
315 static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
316 {
317         iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
318 }
319
320 static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
321 {
322         iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
323 }
324
325 static void ccp_irq_bh(unsigned long data)
326 {
327         struct ccp_device *ccp = (struct ccp_device *)data;
328         struct ccp_cmd_queue *cmd_q;
329         u32 q_int, status;
330         unsigned int i;
331
332         status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
333
334         for (i = 0; i < ccp->cmd_q_count; i++) {
335                 cmd_q = &ccp->cmd_q[i];
336
337                 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
338                 if (q_int) {
339                         cmd_q->int_status = status;
340                         cmd_q->q_status = ioread32(cmd_q->reg_status);
341                         cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
342
343                         /* On error, only save the first error value */
344                         if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
345                                 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
346
347                         cmd_q->int_rcvd = 1;
348
349                         /* Acknowledge the interrupt and wake the kthread */
350                         iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
351                         wake_up_interruptible(&cmd_q->int_queue);
352                 }
353         }
354         ccp_enable_queue_interrupts(ccp);
355 }
356
357 static irqreturn_t ccp_irq_handler(int irq, void *data)
358 {
359         struct ccp_device *ccp = (struct ccp_device *)data;
360
361         ccp_disable_queue_interrupts(ccp);
362         if (ccp->use_tasklet)
363                 tasklet_schedule(&ccp->irq_tasklet);
364         else
365                 ccp_irq_bh((unsigned long)ccp);
366
367         return IRQ_HANDLED;
368 }
369
370 static int ccp_init(struct ccp_device *ccp)
371 {
372         struct device *dev = ccp->dev;
373         struct ccp_cmd_queue *cmd_q;
374         struct dma_pool *dma_pool;
375         char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
376         unsigned int qmr, i;
377         int ret;
378
379         /* Find available queues */
380         ccp->qim = 0;
381         qmr = ioread32(ccp->io_regs + Q_MASK_REG);
382         for (i = 0; i < MAX_HW_QUEUES; i++) {
383                 if (!(qmr & (1 << i)))
384                         continue;
385
386                 /* Allocate a dma pool for this queue */
387                 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
388                          ccp->name, i);
389                 dma_pool = dma_pool_create(dma_pool_name, dev,
390                                            CCP_DMAPOOL_MAX_SIZE,
391                                            CCP_DMAPOOL_ALIGN, 0);
392                 if (!dma_pool) {
393                         dev_err(dev, "unable to allocate dma pool\n");
394                         ret = -ENOMEM;
395                         goto e_pool;
396                 }
397
398                 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
399                 ccp->cmd_q_count++;
400
401                 cmd_q->ccp = ccp;
402                 cmd_q->id = i;
403                 cmd_q->dma_pool = dma_pool;
404
405                 /* Reserve 2 KSB regions for the queue */
406                 cmd_q->sb_key = KSB_START + ccp->sb_start++;
407                 cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
408                 ccp->sb_count -= 2;
409
410                 /* Preset some register values and masks that are queue
411                  * number dependent
412                  */
413                 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
414                                     (CMD_Q_STATUS_INCR * i);
415                 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
416                                         (CMD_Q_STATUS_INCR * i);
417                 cmd_q->int_ok = 1 << (i * 2);
418                 cmd_q->int_err = 1 << ((i * 2) + 1);
419
420                 cmd_q->free_slots = ccp_get_free_slots(cmd_q);
421
422                 init_waitqueue_head(&cmd_q->int_queue);
423
424                 /* Build queue interrupt mask (two interrupts per queue) */
425                 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
426
427 #ifdef CONFIG_ARM64
428                 /* For arm64 set the recommended queue cache settings */
429                 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
430                           (CMD_Q_CACHE_INC * i));
431 #endif
432
433                 dev_dbg(dev, "queue #%u available\n", i);
434         }
435         if (ccp->cmd_q_count == 0) {
436                 dev_notice(dev, "no command queues available\n");
437                 ret = -EIO;
438                 goto e_pool;
439         }
440         dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
441
442         /* Disable and clear interrupts until ready */
443         ccp_disable_queue_interrupts(ccp);
444         for (i = 0; i < ccp->cmd_q_count; i++) {
445                 cmd_q = &ccp->cmd_q[i];
446
447                 ioread32(cmd_q->reg_int_status);
448                 ioread32(cmd_q->reg_status);
449         }
450         iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
451
452         /* Request an irq */
453         ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
454         if (ret) {
455                 dev_err(dev, "unable to allocate an IRQ\n");
456                 goto e_pool;
457         }
458
459         /* Initialize the ISR tasklet? */
460         if (ccp->use_tasklet)
461                 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
462                              (unsigned long)ccp);
463
464         dev_dbg(dev, "Starting threads...\n");
465         /* Create a kthread for each queue */
466         for (i = 0; i < ccp->cmd_q_count; i++) {
467                 struct task_struct *kthread;
468
469                 cmd_q = &ccp->cmd_q[i];
470
471                 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
472                                          "%s-q%u", ccp->name, cmd_q->id);
473                 if (IS_ERR(kthread)) {
474                         dev_err(dev, "error creating queue thread (%ld)\n",
475                                 PTR_ERR(kthread));
476                         ret = PTR_ERR(kthread);
477                         goto e_kthread;
478                 }
479
480                 cmd_q->kthread = kthread;
481                 wake_up_process(kthread);
482         }
483
484         dev_dbg(dev, "Enabling interrupts...\n");
485         /* Enable interrupts */
486         ccp_enable_queue_interrupts(ccp);
487
488         dev_dbg(dev, "Registering device...\n");
489         ccp_add_device(ccp);
490
491         ret = ccp_register_rng(ccp);
492         if (ret)
493                 goto e_kthread;
494
495         /* Register the DMA engine support */
496         ret = ccp_dmaengine_register(ccp);
497         if (ret)
498                 goto e_hwrng;
499
500         return 0;
501
502 e_hwrng:
503         ccp_unregister_rng(ccp);
504
505 e_kthread:
506         for (i = 0; i < ccp->cmd_q_count; i++)
507                 if (ccp->cmd_q[i].kthread)
508                         kthread_stop(ccp->cmd_q[i].kthread);
509
510         sp_free_ccp_irq(ccp->sp, ccp);
511
512 e_pool:
513         for (i = 0; i < ccp->cmd_q_count; i++)
514                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
515
516         return ret;
517 }
518
519 static void ccp_destroy(struct ccp_device *ccp)
520 {
521         struct ccp_cmd_queue *cmd_q;
522         struct ccp_cmd *cmd;
523         unsigned int i;
524
525         /* Unregister the DMA engine */
526         ccp_dmaengine_unregister(ccp);
527
528         /* Unregister the RNG */
529         ccp_unregister_rng(ccp);
530
531         /* Remove this device from the list of available units */
532         ccp_del_device(ccp);
533
534         /* Disable and clear interrupts */
535         ccp_disable_queue_interrupts(ccp);
536         for (i = 0; i < ccp->cmd_q_count; i++) {
537                 cmd_q = &ccp->cmd_q[i];
538
539                 ioread32(cmd_q->reg_int_status);
540                 ioread32(cmd_q->reg_status);
541         }
542         iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
543
544         /* Stop the queue kthreads */
545         for (i = 0; i < ccp->cmd_q_count; i++)
546                 if (ccp->cmd_q[i].kthread)
547                         kthread_stop(ccp->cmd_q[i].kthread);
548
549         sp_free_ccp_irq(ccp->sp, ccp);
550
551         for (i = 0; i < ccp->cmd_q_count; i++)
552                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
553
554         /* Flush the cmd and backlog queue */
555         while (!list_empty(&ccp->cmd)) {
556                 /* Invoke the callback directly with an error code */
557                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
558                 list_del(&cmd->entry);
559                 cmd->callback(cmd->data, -ENODEV);
560         }
561         while (!list_empty(&ccp->backlog)) {
562                 /* Invoke the callback directly with an error code */
563                 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
564                 list_del(&cmd->entry);
565                 cmd->callback(cmd->data, -ENODEV);
566         }
567 }
568
569 static const struct ccp_actions ccp3_actions = {
570         .aes = ccp_perform_aes,
571         .xts_aes = ccp_perform_xts_aes,
572         .des3 = NULL,
573         .sha = ccp_perform_sha,
574         .rsa = ccp_perform_rsa,
575         .passthru = ccp_perform_passthru,
576         .ecc = ccp_perform_ecc,
577         .sballoc = ccp_alloc_ksb,
578         .sbfree = ccp_free_ksb,
579         .init = ccp_init,
580         .destroy = ccp_destroy,
581         .get_free_slots = ccp_get_free_slots,
582         .irqhandler = ccp_irq_handler,
583 };
584
585 const struct ccp_vdata ccpv3_platform = {
586         .version = CCP_VERSION(3, 0),
587         .setup = NULL,
588         .perform = &ccp3_actions,
589         .offset = 0,
590 };
591
592 const struct ccp_vdata ccpv3 = {
593         .version = CCP_VERSION(3, 0),
594         .setup = NULL,
595         .perform = &ccp3_actions,
596         .offset = 0x20000,
597         .rsamax = CCP_RSA_MAX_WIDTH,
598 };