Merge tag 'arc-5.2-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / drivers / crypto / ccp / ccp-dev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Cryptographic Coprocessor (CCP) driver
4  *
5  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  * Author: Gary R Hook <gary.hook@amd.com>
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/spinlock_types.h>
17 #include <linux/types.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/hw_random.h>
21 #include <linux/cpu.h>
22 #ifdef CONFIG_X86
23 #include <asm/cpu_device_id.h>
24 #endif
25 #include <linux/ccp.h>
26
27 #include "ccp-dev.h"
28
29 struct ccp_tasklet_data {
30         struct completion completion;
31         struct ccp_cmd *cmd;
32 };
33
34 /* Human-readable error strings */
35 static char *ccp_error_codes[] = {
36         "",
37         "ERR 01: ILLEGAL_ENGINE",
38         "ERR 02: ILLEGAL_KEY_ID",
39         "ERR 03: ILLEGAL_FUNCTION_TYPE",
40         "ERR 04: ILLEGAL_FUNCTION_MODE",
41         "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
42         "ERR 06: ILLEGAL_FUNCTION_SIZE",
43         "ERR 07: Zlib_MISSING_INIT_EOM",
44         "ERR 08: ILLEGAL_FUNCTION_RSVD",
45         "ERR 09: ILLEGAL_BUFFER_LENGTH",
46         "ERR 10: VLSB_FAULT",
47         "ERR 11: ILLEGAL_MEM_ADDR",
48         "ERR 12: ILLEGAL_MEM_SEL",
49         "ERR 13: ILLEGAL_CONTEXT_ID",
50         "ERR 14: ILLEGAL_KEY_ADDR",
51         "ERR 15: 0xF Reserved",
52         "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
53         "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
54         "ERR 18: CMD_TIMEOUT",
55         "ERR 19: IDMA0_AXI_SLVERR",
56         "ERR 20: IDMA0_AXI_DECERR",
57         "ERR 21: 0x15 Reserved",
58         "ERR 22: IDMA1_AXI_SLAVE_FAULT",
59         "ERR 23: IDMA1_AIXI_DECERR",
60         "ERR 24: 0x18 Reserved",
61         "ERR 25: ZLIBVHB_AXI_SLVERR",
62         "ERR 26: ZLIBVHB_AXI_DECERR",
63         "ERR 27: 0x1B Reserved",
64         "ERR 27: ZLIB_UNEXPECTED_EOM",
65         "ERR 27: ZLIB_EXTRA_DATA",
66         "ERR 30: ZLIB_BTYPE",
67         "ERR 31: ZLIB_UNDEFINED_SYMBOL",
68         "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
69         "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
70         "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
71         "ERR 35: ZLIB_UNCOMPRESSED_LEN",
72         "ERR 36: ZLIB_LIMIT_REACHED",
73         "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
74         "ERR 38: ODMA0_AXI_SLVERR",
75         "ERR 39: ODMA0_AXI_DECERR",
76         "ERR 40: 0x28 Reserved",
77         "ERR 41: ODMA1_AXI_SLVERR",
78         "ERR 42: ODMA1_AXI_DECERR",
79         "ERR 43: LSB_PARITY_ERR",
80 };
81
82 void ccp_log_error(struct ccp_device *d, int e)
83 {
84         dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
85 }
86
87 /* List of CCPs, CCP count, read-write access lock, and access functions
88  *
89  * Lock structure: get ccp_unit_lock for reading whenever we need to
90  * examine the CCP list. While holding it for reading we can acquire
91  * the RR lock to update the round-robin next-CCP pointer. The unit lock
92  * must be acquired before the RR lock.
93  *
94  * If the unit-lock is acquired for writing, we have total control over
95  * the list, so there's no value in getting the RR lock.
96  */
97 static DEFINE_RWLOCK(ccp_unit_lock);
98 static LIST_HEAD(ccp_units);
99
100 /* Round-robin counter */
101 static DEFINE_SPINLOCK(ccp_rr_lock);
102 static struct ccp_device *ccp_rr;
103
104 /**
105  * ccp_add_device - add a CCP device to the list
106  *
107  * @ccp: ccp_device struct pointer
108  *
109  * Put this CCP on the unit list, which makes it available
110  * for use.
111  *
112  * Returns zero if a CCP device is present, -ENODEV otherwise.
113  */
114 void ccp_add_device(struct ccp_device *ccp)
115 {
116         unsigned long flags;
117
118         write_lock_irqsave(&ccp_unit_lock, flags);
119         list_add_tail(&ccp->entry, &ccp_units);
120         if (!ccp_rr)
121                 /* We already have the list lock (we're first) so this
122                  * pointer can't change on us. Set its initial value.
123                  */
124                 ccp_rr = ccp;
125         write_unlock_irqrestore(&ccp_unit_lock, flags);
126 }
127
128 /**
129  * ccp_del_device - remove a CCP device from the list
130  *
131  * @ccp: ccp_device struct pointer
132  *
133  * Remove this unit from the list of devices. If the next device
134  * up for use is this one, adjust the pointer. If this is the last
135  * device, NULL the pointer.
136  */
137 void ccp_del_device(struct ccp_device *ccp)
138 {
139         unsigned long flags;
140
141         write_lock_irqsave(&ccp_unit_lock, flags);
142         if (ccp_rr == ccp) {
143                 /* ccp_unit_lock is read/write; any read access
144                  * will be suspended while we make changes to the
145                  * list and RR pointer.
146                  */
147                 if (list_is_last(&ccp_rr->entry, &ccp_units))
148                         ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
149                                                   entry);
150                 else
151                         ccp_rr = list_next_entry(ccp_rr, entry);
152         }
153         list_del(&ccp->entry);
154         if (list_empty(&ccp_units))
155                 ccp_rr = NULL;
156         write_unlock_irqrestore(&ccp_unit_lock, flags);
157 }
158
159
160
161 int ccp_register_rng(struct ccp_device *ccp)
162 {
163         int ret = 0;
164
165         dev_dbg(ccp->dev, "Registering RNG...\n");
166         /* Register an RNG */
167         ccp->hwrng.name = ccp->rngname;
168         ccp->hwrng.read = ccp_trng_read;
169         ret = hwrng_register(&ccp->hwrng);
170         if (ret)
171                 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
172
173         return ret;
174 }
175
176 void ccp_unregister_rng(struct ccp_device *ccp)
177 {
178         if (ccp->hwrng.name)
179                 hwrng_unregister(&ccp->hwrng);
180 }
181
182 static struct ccp_device *ccp_get_device(void)
183 {
184         unsigned long flags;
185         struct ccp_device *dp = NULL;
186
187         /* We round-robin through the unit list.
188          * The (ccp_rr) pointer refers to the next unit to use.
189          */
190         read_lock_irqsave(&ccp_unit_lock, flags);
191         if (!list_empty(&ccp_units)) {
192                 spin_lock(&ccp_rr_lock);
193                 dp = ccp_rr;
194                 if (list_is_last(&ccp_rr->entry, &ccp_units))
195                         ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
196                                                   entry);
197                 else
198                         ccp_rr = list_next_entry(ccp_rr, entry);
199                 spin_unlock(&ccp_rr_lock);
200         }
201         read_unlock_irqrestore(&ccp_unit_lock, flags);
202
203         return dp;
204 }
205
206 /**
207  * ccp_present - check if a CCP device is present
208  *
209  * Returns zero if a CCP device is present, -ENODEV otherwise.
210  */
211 int ccp_present(void)
212 {
213         unsigned long flags;
214         int ret;
215
216         read_lock_irqsave(&ccp_unit_lock, flags);
217         ret = list_empty(&ccp_units);
218         read_unlock_irqrestore(&ccp_unit_lock, flags);
219
220         return ret ? -ENODEV : 0;
221 }
222 EXPORT_SYMBOL_GPL(ccp_present);
223
224 /**
225  * ccp_version - get the version of the CCP device
226  *
227  * Returns the version from the first unit on the list;
228  * otherwise a zero if no CCP device is present
229  */
230 unsigned int ccp_version(void)
231 {
232         struct ccp_device *dp;
233         unsigned long flags;
234         int ret = 0;
235
236         read_lock_irqsave(&ccp_unit_lock, flags);
237         if (!list_empty(&ccp_units)) {
238                 dp = list_first_entry(&ccp_units, struct ccp_device, entry);
239                 ret = dp->vdata->version;
240         }
241         read_unlock_irqrestore(&ccp_unit_lock, flags);
242
243         return ret;
244 }
245 EXPORT_SYMBOL_GPL(ccp_version);
246
247 /**
248  * ccp_enqueue_cmd - queue an operation for processing by the CCP
249  *
250  * @cmd: ccp_cmd struct to be processed
251  *
252  * Queue a cmd to be processed by the CCP. If queueing the cmd
253  * would exceed the defined length of the cmd queue the cmd will
254  * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
255  * result in a return code of -EBUSY.
256  *
257  * The callback routine specified in the ccp_cmd struct will be
258  * called to notify the caller of completion (if the cmd was not
259  * backlogged) or advancement out of the backlog. If the cmd has
260  * advanced out of the backlog the "err" value of the callback
261  * will be -EINPROGRESS. Any other "err" value during callback is
262  * the result of the operation.
263  *
264  * The cmd has been successfully queued if:
265  *   the return code is -EINPROGRESS or
266  *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
267  */
268 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
269 {
270         struct ccp_device *ccp;
271         unsigned long flags;
272         unsigned int i;
273         int ret;
274
275         /* Some commands might need to be sent to a specific device */
276         ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
277
278         if (!ccp)
279                 return -ENODEV;
280
281         /* Caller must supply a callback routine */
282         if (!cmd->callback)
283                 return -EINVAL;
284
285         cmd->ccp = ccp;
286
287         spin_lock_irqsave(&ccp->cmd_lock, flags);
288
289         i = ccp->cmd_q_count;
290
291         if (ccp->cmd_count >= MAX_CMD_QLEN) {
292                 if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
293                         ret = -EBUSY;
294                         list_add_tail(&cmd->entry, &ccp->backlog);
295                 } else {
296                         ret = -ENOSPC;
297                 }
298         } else {
299                 ret = -EINPROGRESS;
300                 ccp->cmd_count++;
301                 list_add_tail(&cmd->entry, &ccp->cmd);
302
303                 /* Find an idle queue */
304                 if (!ccp->suspending) {
305                         for (i = 0; i < ccp->cmd_q_count; i++) {
306                                 if (ccp->cmd_q[i].active)
307                                         continue;
308
309                                 break;
310                         }
311                 }
312         }
313
314         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
315
316         /* If we found an idle queue, wake it up */
317         if (i < ccp->cmd_q_count)
318                 wake_up_process(ccp->cmd_q[i].kthread);
319
320         return ret;
321 }
322 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
323
324 static void ccp_do_cmd_backlog(struct work_struct *work)
325 {
326         struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
327         struct ccp_device *ccp = cmd->ccp;
328         unsigned long flags;
329         unsigned int i;
330
331         cmd->callback(cmd->data, -EINPROGRESS);
332
333         spin_lock_irqsave(&ccp->cmd_lock, flags);
334
335         ccp->cmd_count++;
336         list_add_tail(&cmd->entry, &ccp->cmd);
337
338         /* Find an idle queue */
339         for (i = 0; i < ccp->cmd_q_count; i++) {
340                 if (ccp->cmd_q[i].active)
341                         continue;
342
343                 break;
344         }
345
346         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
347
348         /* If we found an idle queue, wake it up */
349         if (i < ccp->cmd_q_count)
350                 wake_up_process(ccp->cmd_q[i].kthread);
351 }
352
353 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
354 {
355         struct ccp_device *ccp = cmd_q->ccp;
356         struct ccp_cmd *cmd = NULL;
357         struct ccp_cmd *backlog = NULL;
358         unsigned long flags;
359
360         spin_lock_irqsave(&ccp->cmd_lock, flags);
361
362         cmd_q->active = 0;
363
364         if (ccp->suspending) {
365                 cmd_q->suspended = 1;
366
367                 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
368                 wake_up_interruptible(&ccp->suspend_queue);
369
370                 return NULL;
371         }
372
373         if (ccp->cmd_count) {
374                 cmd_q->active = 1;
375
376                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
377                 list_del(&cmd->entry);
378
379                 ccp->cmd_count--;
380         }
381
382         if (!list_empty(&ccp->backlog)) {
383                 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
384                                            entry);
385                 list_del(&backlog->entry);
386         }
387
388         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
389
390         if (backlog) {
391                 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
392                 schedule_work(&backlog->work);
393         }
394
395         return cmd;
396 }
397
398 static void ccp_do_cmd_complete(unsigned long data)
399 {
400         struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
401         struct ccp_cmd *cmd = tdata->cmd;
402
403         cmd->callback(cmd->data, cmd->ret);
404
405         complete(&tdata->completion);
406 }
407
408 /**
409  * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
410  *
411  * @data: thread-specific data
412  */
413 int ccp_cmd_queue_thread(void *data)
414 {
415         struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
416         struct ccp_cmd *cmd;
417         struct ccp_tasklet_data tdata;
418         struct tasklet_struct tasklet;
419
420         tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
421
422         set_current_state(TASK_INTERRUPTIBLE);
423         while (!kthread_should_stop()) {
424                 schedule();
425
426                 set_current_state(TASK_INTERRUPTIBLE);
427
428                 cmd = ccp_dequeue_cmd(cmd_q);
429                 if (!cmd)
430                         continue;
431
432                 __set_current_state(TASK_RUNNING);
433
434                 /* Execute the command */
435                 cmd->ret = ccp_run_cmd(cmd_q, cmd);
436
437                 /* Schedule the completion callback */
438                 tdata.cmd = cmd;
439                 init_completion(&tdata.completion);
440                 tasklet_schedule(&tasklet);
441                 wait_for_completion(&tdata.completion);
442         }
443
444         __set_current_state(TASK_RUNNING);
445
446         return 0;
447 }
448
449 /**
450  * ccp_alloc_struct - allocate and initialize the ccp_device struct
451  *
452  * @dev: device struct of the CCP
453  */
454 struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
455 {
456         struct device *dev = sp->dev;
457         struct ccp_device *ccp;
458
459         ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
460         if (!ccp)
461                 return NULL;
462         ccp->dev = dev;
463         ccp->sp = sp;
464         ccp->axcache = sp->axcache;
465
466         INIT_LIST_HEAD(&ccp->cmd);
467         INIT_LIST_HEAD(&ccp->backlog);
468
469         spin_lock_init(&ccp->cmd_lock);
470         mutex_init(&ccp->req_mutex);
471         mutex_init(&ccp->sb_mutex);
472         ccp->sb_count = KSB_COUNT;
473         ccp->sb_start = 0;
474
475         /* Initialize the wait queues */
476         init_waitqueue_head(&ccp->sb_queue);
477         init_waitqueue_head(&ccp->suspend_queue);
478
479         snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
480         snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
481
482         return ccp;
483 }
484
485 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
486 {
487         struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
488         u32 trng_value;
489         int len = min_t(int, sizeof(trng_value), max);
490
491         /* Locking is provided by the caller so we can update device
492          * hwrng-related fields safely
493          */
494         trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
495         if (!trng_value) {
496                 /* Zero is returned if not data is available or if a
497                  * bad-entropy error is present. Assume an error if
498                  * we exceed TRNG_RETRIES reads of zero.
499                  */
500                 if (ccp->hwrng_retries++ > TRNG_RETRIES)
501                         return -EIO;
502
503                 return 0;
504         }
505
506         /* Reset the counter and save the rng value */
507         ccp->hwrng_retries = 0;
508         memcpy(data, &trng_value, len);
509
510         return len;
511 }
512
513 #ifdef CONFIG_PM
514 bool ccp_queues_suspended(struct ccp_device *ccp)
515 {
516         unsigned int suspended = 0;
517         unsigned long flags;
518         unsigned int i;
519
520         spin_lock_irqsave(&ccp->cmd_lock, flags);
521
522         for (i = 0; i < ccp->cmd_q_count; i++)
523                 if (ccp->cmd_q[i].suspended)
524                         suspended++;
525
526         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
527
528         return ccp->cmd_q_count == suspended;
529 }
530
531 int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
532 {
533         struct ccp_device *ccp = sp->ccp_data;
534         unsigned long flags;
535         unsigned int i;
536
537         spin_lock_irqsave(&ccp->cmd_lock, flags);
538
539         ccp->suspending = 1;
540
541         /* Wake all the queue kthreads to prepare for suspend */
542         for (i = 0; i < ccp->cmd_q_count; i++)
543                 wake_up_process(ccp->cmd_q[i].kthread);
544
545         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
546
547         /* Wait for all queue kthreads to say they're done */
548         while (!ccp_queues_suspended(ccp))
549                 wait_event_interruptible(ccp->suspend_queue,
550                                          ccp_queues_suspended(ccp));
551
552         return 0;
553 }
554
555 int ccp_dev_resume(struct sp_device *sp)
556 {
557         struct ccp_device *ccp = sp->ccp_data;
558         unsigned long flags;
559         unsigned int i;
560
561         spin_lock_irqsave(&ccp->cmd_lock, flags);
562
563         ccp->suspending = 0;
564
565         /* Wake up all the kthreads */
566         for (i = 0; i < ccp->cmd_q_count; i++) {
567                 ccp->cmd_q[i].suspended = 0;
568                 wake_up_process(ccp->cmd_q[i].kthread);
569         }
570
571         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
572
573         return 0;
574 }
575 #endif
576
577 int ccp_dev_init(struct sp_device *sp)
578 {
579         struct device *dev = sp->dev;
580         struct ccp_device *ccp;
581         int ret;
582
583         ret = -ENOMEM;
584         ccp = ccp_alloc_struct(sp);
585         if (!ccp)
586                 goto e_err;
587         sp->ccp_data = ccp;
588
589         ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
590         if (!ccp->vdata || !ccp->vdata->version) {
591                 ret = -ENODEV;
592                 dev_err(dev, "missing driver data\n");
593                 goto e_err;
594         }
595
596         ccp->use_tasklet = sp->use_tasklet;
597
598         ccp->io_regs = sp->io_map + ccp->vdata->offset;
599         if (ccp->vdata->setup)
600                 ccp->vdata->setup(ccp);
601
602         ret = ccp->vdata->perform->init(ccp);
603         if (ret)
604                 goto e_err;
605
606         dev_notice(dev, "ccp enabled\n");
607
608         return 0;
609
610 e_err:
611         sp->ccp_data = NULL;
612
613         dev_notice(dev, "ccp initialization failed\n");
614
615         return ret;
616 }
617
618 void ccp_dev_destroy(struct sp_device *sp)
619 {
620         struct ccp_device *ccp = sp->ccp_data;
621
622         if (!ccp)
623                 return;
624
625         ccp->vdata->perform->destroy(ccp);
626 }