Merge tag 'for-5.18-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / char / hw_random / core.c
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28
29 #define RNG_MODULE_NAME         "hw_random"
30
31 static struct hwrng *current_rng;
32 /* the current rng has been explicitly chosen by user via sysfs */
33 static int cur_rng_set_by_user;
34 static struct task_struct *hwrng_fill;
35 /* list of registered rngs */
36 static LIST_HEAD(rng_list);
37 /* Protects rng_list and current_rng */
38 static DEFINE_MUTEX(rng_mutex);
39 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40 static DEFINE_MUTEX(reading_mutex);
41 static int data_avail;
42 static u8 *rng_buffer, *rng_fillbuf;
43 static unsigned short current_quality;
44 static unsigned short default_quality; /* = 0; default to "off" */
45
46 module_param(current_quality, ushort, 0644);
47 MODULE_PARM_DESC(current_quality,
48                  "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
49 module_param(default_quality, ushort, 0644);
50 MODULE_PARM_DESC(default_quality,
51                  "default entropy content of hwrng per 1024 bits of input");
52
53 static void drop_current_rng(void);
54 static int hwrng_init(struct hwrng *rng);
55 static void hwrng_manage_rngd(struct hwrng *rng);
56
57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58                                int wait);
59
60 static size_t rng_buffer_size(void)
61 {
62         return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63 }
64
65 static void add_early_randomness(struct hwrng *rng)
66 {
67         int bytes_read;
68
69         mutex_lock(&reading_mutex);
70         bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
71         mutex_unlock(&reading_mutex);
72         if (bytes_read > 0)
73                 add_device_randomness(rng_fillbuf, bytes_read);
74 }
75
76 static inline void cleanup_rng(struct kref *kref)
77 {
78         struct hwrng *rng = container_of(kref, struct hwrng, ref);
79
80         if (rng->cleanup)
81                 rng->cleanup(rng);
82
83         complete(&rng->cleanup_done);
84 }
85
86 static int set_current_rng(struct hwrng *rng)
87 {
88         int err;
89
90         BUG_ON(!mutex_is_locked(&rng_mutex));
91
92         err = hwrng_init(rng);
93         if (err)
94                 return err;
95
96         drop_current_rng();
97         current_rng = rng;
98
99         return 0;
100 }
101
102 static void drop_current_rng(void)
103 {
104         BUG_ON(!mutex_is_locked(&rng_mutex));
105         if (!current_rng)
106                 return;
107
108         /* decrease last reference for triggering the cleanup */
109         kref_put(&current_rng->ref, cleanup_rng);
110         current_rng = NULL;
111 }
112
113 /* Returns ERR_PTR(), NULL or refcounted hwrng */
114 static struct hwrng *get_current_rng_nolock(void)
115 {
116         if (current_rng)
117                 kref_get(&current_rng->ref);
118
119         return current_rng;
120 }
121
122 static struct hwrng *get_current_rng(void)
123 {
124         struct hwrng *rng;
125
126         if (mutex_lock_interruptible(&rng_mutex))
127                 return ERR_PTR(-ERESTARTSYS);
128
129         rng = get_current_rng_nolock();
130
131         mutex_unlock(&rng_mutex);
132         return rng;
133 }
134
135 static void put_rng(struct hwrng *rng)
136 {
137         /*
138          * Hold rng_mutex here so we serialize in case they set_current_rng
139          * on rng again immediately.
140          */
141         mutex_lock(&rng_mutex);
142         if (rng)
143                 kref_put(&rng->ref, cleanup_rng);
144         mutex_unlock(&rng_mutex);
145 }
146
147 static int hwrng_init(struct hwrng *rng)
148 {
149         if (kref_get_unless_zero(&rng->ref))
150                 goto skip_init;
151
152         if (rng->init) {
153                 int ret;
154
155                 ret =  rng->init(rng);
156                 if (ret)
157                         return ret;
158         }
159
160         kref_init(&rng->ref);
161         reinit_completion(&rng->cleanup_done);
162
163 skip_init:
164         if (!rng->quality)
165                 rng->quality = default_quality;
166         if (rng->quality > 1024)
167                 rng->quality = 1024;
168         current_quality = rng->quality; /* obsolete */
169
170         hwrng_manage_rngd(rng);
171
172         return 0;
173 }
174
175 static int rng_dev_open(struct inode *inode, struct file *filp)
176 {
177         /* enforce read-only access to this chrdev */
178         if ((filp->f_mode & FMODE_READ) == 0)
179                 return -EINVAL;
180         if (filp->f_mode & FMODE_WRITE)
181                 return -EINVAL;
182         return 0;
183 }
184
185 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
186                         int wait) {
187         int present;
188
189         BUG_ON(!mutex_is_locked(&reading_mutex));
190         if (rng->read)
191                 return rng->read(rng, (void *)buffer, size, wait);
192
193         if (rng->data_present)
194                 present = rng->data_present(rng, wait);
195         else
196                 present = 1;
197
198         if (present)
199                 return rng->data_read(rng, (u32 *)buffer);
200
201         return 0;
202 }
203
204 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
205                             size_t size, loff_t *offp)
206 {
207         ssize_t ret = 0;
208         int err = 0;
209         int bytes_read, len;
210         struct hwrng *rng;
211
212         while (size) {
213                 rng = get_current_rng();
214                 if (IS_ERR(rng)) {
215                         err = PTR_ERR(rng);
216                         goto out;
217                 }
218                 if (!rng) {
219                         err = -ENODEV;
220                         goto out;
221                 }
222
223                 if (mutex_lock_interruptible(&reading_mutex)) {
224                         err = -ERESTARTSYS;
225                         goto out_put;
226                 }
227                 if (!data_avail) {
228                         bytes_read = rng_get_data(rng, rng_buffer,
229                                 rng_buffer_size(),
230                                 !(filp->f_flags & O_NONBLOCK));
231                         if (bytes_read < 0) {
232                                 err = bytes_read;
233                                 goto out_unlock_reading;
234                         }
235                         data_avail = bytes_read;
236                 }
237
238                 if (!data_avail) {
239                         if (filp->f_flags & O_NONBLOCK) {
240                                 err = -EAGAIN;
241                                 goto out_unlock_reading;
242                         }
243                 } else {
244                         len = data_avail;
245                         if (len > size)
246                                 len = size;
247
248                         data_avail -= len;
249
250                         if (copy_to_user(buf + ret, rng_buffer + data_avail,
251                                                                 len)) {
252                                 err = -EFAULT;
253                                 goto out_unlock_reading;
254                         }
255
256                         size -= len;
257                         ret += len;
258                 }
259
260                 mutex_unlock(&reading_mutex);
261                 put_rng(rng);
262
263                 if (need_resched())
264                         schedule_timeout_interruptible(1);
265
266                 if (signal_pending(current)) {
267                         err = -ERESTARTSYS;
268                         goto out;
269                 }
270         }
271 out:
272         return ret ? : err;
273
274 out_unlock_reading:
275         mutex_unlock(&reading_mutex);
276 out_put:
277         put_rng(rng);
278         goto out;
279 }
280
281 static const struct file_operations rng_chrdev_ops = {
282         .owner          = THIS_MODULE,
283         .open           = rng_dev_open,
284         .read           = rng_dev_read,
285         .llseek         = noop_llseek,
286 };
287
288 static const struct attribute_group *rng_dev_groups[];
289
290 static struct miscdevice rng_miscdev = {
291         .minor          = HWRNG_MINOR,
292         .name           = RNG_MODULE_NAME,
293         .nodename       = "hwrng",
294         .fops           = &rng_chrdev_ops,
295         .groups         = rng_dev_groups,
296 };
297
298 static int enable_best_rng(void)
299 {
300         struct hwrng *rng, *new_rng = NULL;
301         int ret = -ENODEV;
302
303         BUG_ON(!mutex_is_locked(&rng_mutex));
304
305         /* no rng to use? */
306         if (list_empty(&rng_list)) {
307                 drop_current_rng();
308                 cur_rng_set_by_user = 0;
309                 return 0;
310         }
311
312         /* use the rng which offers the best quality */
313         list_for_each_entry(rng, &rng_list, list) {
314                 if (!new_rng || rng->quality > new_rng->quality)
315                         new_rng = rng;
316         }
317
318         ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
319         if (!ret)
320                 cur_rng_set_by_user = 0;
321
322         return ret;
323 }
324
325 static ssize_t rng_current_store(struct device *dev,
326                                  struct device_attribute *attr,
327                                  const char *buf, size_t len)
328 {
329         int err;
330         struct hwrng *rng, *old_rng, *new_rng;
331
332         err = mutex_lock_interruptible(&rng_mutex);
333         if (err)
334                 return -ERESTARTSYS;
335
336         old_rng = current_rng;
337         if (sysfs_streq(buf, "")) {
338                 err = enable_best_rng();
339         } else {
340                 list_for_each_entry(rng, &rng_list, list) {
341                         if (sysfs_streq(rng->name, buf)) {
342                                 err = set_current_rng(rng);
343                                 if (!err)
344                                         cur_rng_set_by_user = 1;
345                                 break;
346                         }
347                 }
348         }
349         new_rng = get_current_rng_nolock();
350         mutex_unlock(&rng_mutex);
351
352         if (new_rng) {
353                 if (new_rng != old_rng)
354                         add_early_randomness(new_rng);
355                 put_rng(new_rng);
356         }
357
358         return err ? : len;
359 }
360
361 static ssize_t rng_current_show(struct device *dev,
362                                 struct device_attribute *attr,
363                                 char *buf)
364 {
365         ssize_t ret;
366         struct hwrng *rng;
367
368         rng = get_current_rng();
369         if (IS_ERR(rng))
370                 return PTR_ERR(rng);
371
372         ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
373         put_rng(rng);
374
375         return ret;
376 }
377
378 static ssize_t rng_available_show(struct device *dev,
379                                   struct device_attribute *attr,
380                                   char *buf)
381 {
382         int err;
383         struct hwrng *rng;
384
385         err = mutex_lock_interruptible(&rng_mutex);
386         if (err)
387                 return -ERESTARTSYS;
388         buf[0] = '\0';
389         list_for_each_entry(rng, &rng_list, list) {
390                 strlcat(buf, rng->name, PAGE_SIZE);
391                 strlcat(buf, " ", PAGE_SIZE);
392         }
393         strlcat(buf, "\n", PAGE_SIZE);
394         mutex_unlock(&rng_mutex);
395
396         return strlen(buf);
397 }
398
399 static ssize_t rng_selected_show(struct device *dev,
400                                  struct device_attribute *attr,
401                                  char *buf)
402 {
403         return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
404 }
405
406 static ssize_t rng_quality_show(struct device *dev,
407                                 struct device_attribute *attr,
408                                 char *buf)
409 {
410         ssize_t ret;
411         struct hwrng *rng;
412
413         rng = get_current_rng();
414         if (IS_ERR(rng))
415                 return PTR_ERR(rng);
416
417         if (!rng) /* no need to put_rng */
418                 return -ENODEV;
419
420         ret = sysfs_emit(buf, "%hu\n", rng->quality);
421         put_rng(rng);
422
423         return ret;
424 }
425
426 static ssize_t rng_quality_store(struct device *dev,
427                                  struct device_attribute *attr,
428                                  const char *buf, size_t len)
429 {
430         u16 quality;
431         int ret = -EINVAL;
432
433         if (len < 2)
434                 return -EINVAL;
435
436         ret = mutex_lock_interruptible(&rng_mutex);
437         if (ret)
438                 return -ERESTARTSYS;
439
440         ret = kstrtou16(buf, 0, &quality);
441         if (ret || quality > 1024) {
442                 ret = -EINVAL;
443                 goto out;
444         }
445
446         if (!current_rng) {
447                 ret = -ENODEV;
448                 goto out;
449         }
450
451         current_rng->quality = quality;
452         current_quality = quality; /* obsolete */
453
454         /* the best available RNG may have changed */
455         ret = enable_best_rng();
456
457         /* start/stop rngd if necessary */
458         if (current_rng)
459                 hwrng_manage_rngd(current_rng);
460
461 out:
462         mutex_unlock(&rng_mutex);
463         return ret ? ret : len;
464 }
465
466 static DEVICE_ATTR_RW(rng_current);
467 static DEVICE_ATTR_RO(rng_available);
468 static DEVICE_ATTR_RO(rng_selected);
469 static DEVICE_ATTR_RW(rng_quality);
470
471 static struct attribute *rng_dev_attrs[] = {
472         &dev_attr_rng_current.attr,
473         &dev_attr_rng_available.attr,
474         &dev_attr_rng_selected.attr,
475         &dev_attr_rng_quality.attr,
476         NULL
477 };
478
479 ATTRIBUTE_GROUPS(rng_dev);
480
481 static void __exit unregister_miscdev(void)
482 {
483         misc_deregister(&rng_miscdev);
484 }
485
486 static int __init register_miscdev(void)
487 {
488         return misc_register(&rng_miscdev);
489 }
490
491 static int hwrng_fillfn(void *unused)
492 {
493         size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
494         long rc;
495
496         while (!kthread_should_stop()) {
497                 unsigned short quality;
498                 struct hwrng *rng;
499
500                 rng = get_current_rng();
501                 if (IS_ERR(rng) || !rng)
502                         break;
503                 mutex_lock(&reading_mutex);
504                 rc = rng_get_data(rng, rng_fillbuf,
505                                   rng_buffer_size(), 1);
506                 if (current_quality != rng->quality)
507                         rng->quality = current_quality; /* obsolete */
508                 quality = rng->quality;
509                 mutex_unlock(&reading_mutex);
510                 put_rng(rng);
511
512                 if (!quality)
513                         break;
514
515                 if (rc <= 0) {
516                         pr_warn("hwrng: no data available\n");
517                         msleep_interruptible(10000);
518                         continue;
519                 }
520
521                 /* If we cannot credit at least one bit of entropy,
522                  * keep track of the remainder for the next iteration
523                  */
524                 entropy = rc * quality * 8 + entropy_credit;
525                 if ((entropy >> 10) == 0)
526                         entropy_credit = entropy;
527
528                 /* Outside lock, sure, but y'know: randomness. */
529                 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
530                                            entropy >> 10);
531         }
532         hwrng_fill = NULL;
533         return 0;
534 }
535
536 static void hwrng_manage_rngd(struct hwrng *rng)
537 {
538         if (WARN_ON(!mutex_is_locked(&rng_mutex)))
539                 return;
540
541         if (rng->quality == 0 && hwrng_fill)
542                 kthread_stop(hwrng_fill);
543         if (rng->quality > 0 && !hwrng_fill) {
544                 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
545                 if (IS_ERR(hwrng_fill)) {
546                         pr_err("hwrng_fill thread creation failed\n");
547                         hwrng_fill = NULL;
548                 }
549         }
550 }
551
552 int hwrng_register(struct hwrng *rng)
553 {
554         int err = -EINVAL;
555         struct hwrng *tmp;
556         bool is_new_current = false;
557
558         if (!rng->name || (!rng->data_read && !rng->read))
559                 goto out;
560
561         mutex_lock(&rng_mutex);
562
563         /* Must not register two RNGs with the same name. */
564         err = -EEXIST;
565         list_for_each_entry(tmp, &rng_list, list) {
566                 if (strcmp(tmp->name, rng->name) == 0)
567                         goto out_unlock;
568         }
569         list_add_tail(&rng->list, &rng_list);
570
571         init_completion(&rng->cleanup_done);
572         complete(&rng->cleanup_done);
573
574         if (!current_rng ||
575             (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
576                 /*
577                  * Set new rng as current as the new rng source
578                  * provides better entropy quality and was not
579                  * chosen by userspace.
580                  */
581                 err = set_current_rng(rng);
582                 if (err)
583                         goto out_unlock;
584                 /* to use current_rng in add_early_randomness() we need
585                  * to take a ref
586                  */
587                 is_new_current = true;
588                 kref_get(&rng->ref);
589         }
590         mutex_unlock(&rng_mutex);
591         if (is_new_current || !rng->init) {
592                 /*
593                  * Use a new device's input to add some randomness to
594                  * the system.  If this rng device isn't going to be
595                  * used right away, its init function hasn't been
596                  * called yet by set_current_rng(); so only use the
597                  * randomness from devices that don't need an init callback
598                  */
599                 add_early_randomness(rng);
600         }
601         if (is_new_current)
602                 put_rng(rng);
603         return 0;
604 out_unlock:
605         mutex_unlock(&rng_mutex);
606 out:
607         return err;
608 }
609 EXPORT_SYMBOL_GPL(hwrng_register);
610
611 void hwrng_unregister(struct hwrng *rng)
612 {
613         struct hwrng *old_rng, *new_rng;
614         int err;
615
616         mutex_lock(&rng_mutex);
617
618         old_rng = current_rng;
619         list_del(&rng->list);
620         if (current_rng == rng) {
621                 err = enable_best_rng();
622                 if (err) {
623                         drop_current_rng();
624                         cur_rng_set_by_user = 0;
625                 }
626         }
627
628         new_rng = get_current_rng_nolock();
629         if (list_empty(&rng_list)) {
630                 mutex_unlock(&rng_mutex);
631                 if (hwrng_fill)
632                         kthread_stop(hwrng_fill);
633         } else
634                 mutex_unlock(&rng_mutex);
635
636         if (new_rng) {
637                 if (old_rng != new_rng)
638                         add_early_randomness(new_rng);
639                 put_rng(new_rng);
640         }
641
642         wait_for_completion(&rng->cleanup_done);
643 }
644 EXPORT_SYMBOL_GPL(hwrng_unregister);
645
646 static void devm_hwrng_release(struct device *dev, void *res)
647 {
648         hwrng_unregister(*(struct hwrng **)res);
649 }
650
651 static int devm_hwrng_match(struct device *dev, void *res, void *data)
652 {
653         struct hwrng **r = res;
654
655         if (WARN_ON(!r || !*r))
656                 return 0;
657
658         return *r == data;
659 }
660
661 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
662 {
663         struct hwrng **ptr;
664         int error;
665
666         ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
667         if (!ptr)
668                 return -ENOMEM;
669
670         error = hwrng_register(rng);
671         if (error) {
672                 devres_free(ptr);
673                 return error;
674         }
675
676         *ptr = rng;
677         devres_add(dev, ptr);
678         return 0;
679 }
680 EXPORT_SYMBOL_GPL(devm_hwrng_register);
681
682 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
683 {
684         devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
685 }
686 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
687
688 static int __init hwrng_modinit(void)
689 {
690         int ret;
691
692         /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
693         rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
694         if (!rng_buffer)
695                 return -ENOMEM;
696
697         rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
698         if (!rng_fillbuf) {
699                 kfree(rng_buffer);
700                 return -ENOMEM;
701         }
702
703         ret = register_miscdev();
704         if (ret) {
705                 kfree(rng_fillbuf);
706                 kfree(rng_buffer);
707         }
708
709         return ret;
710 }
711
712 static void __exit hwrng_modexit(void)
713 {
714         mutex_lock(&rng_mutex);
715         BUG_ON(current_rng);
716         kfree(rng_buffer);
717         kfree(rng_fillbuf);
718         mutex_unlock(&rng_mutex);
719
720         unregister_miscdev();
721 }
722
723 fs_initcall(hwrng_modinit); /* depends on misc_register() */
724 module_exit(hwrng_modexit);
725
726 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
727 MODULE_LICENSE("GPL");