Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/uio.h>
31 #include <linux/uaccess.h>
32 #include <linux/security.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 #define DEVMEM_MINOR    1
39 #define DEVPORT_MINOR   4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42                                              unsigned long size)
43 {
44         unsigned long sz;
45
46         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48         return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54         return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59         return 1;
60 }
61 #endif
62
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
65 {
66         return devmem_is_allowed(pfn);
67 }
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70         u64 from = ((u64)pfn) << PAGE_SHIFT;
71         u64 to = from + size;
72         u64 cursor = from;
73
74         while (cursor < to) {
75                 if (!devmem_is_allowed(pfn))
76                         return 0;
77                 cursor += PAGE_SIZE;
78                 pfn++;
79         }
80         return 1;
81 }
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
84 {
85         return 1;
86 }
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89         return 1;
90 }
91 #endif
92
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
96 {
97 }
98 #endif
99
100 static inline bool should_stop_iteration(void)
101 {
102         if (need_resched())
103                 cond_resched();
104         return signal_pending(current);
105 }
106
107 /*
108  * This funcion reads the *physical* memory. The f_pos points directly to the
109  * memory location.
110  */
111 static ssize_t read_mem(struct file *file, char __user *buf,
112                         size_t count, loff_t *ppos)
113 {
114         phys_addr_t p = *ppos;
115         ssize_t read, sz;
116         void *ptr;
117         char *bounce;
118         int err;
119
120         if (p != *ppos)
121                 return 0;
122
123         if (!valid_phys_addr_range(p, count))
124                 return -EFAULT;
125         read = 0;
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127         /* we don't have page 0 mapped on sparc and m68k.. */
128         if (p < PAGE_SIZE) {
129                 sz = size_inside_page(p, count);
130                 if (sz > 0) {
131                         if (clear_user(buf, sz))
132                                 return -EFAULT;
133                         buf += sz;
134                         p += sz;
135                         count -= sz;
136                         read += sz;
137                 }
138         }
139 #endif
140
141         bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
142         if (!bounce)
143                 return -ENOMEM;
144
145         while (count > 0) {
146                 unsigned long remaining;
147                 int allowed, probe;
148
149                 sz = size_inside_page(p, count);
150
151                 err = -EPERM;
152                 allowed = page_is_allowed(p >> PAGE_SHIFT);
153                 if (!allowed)
154                         goto failed;
155
156                 err = -EFAULT;
157                 if (allowed == 2) {
158                         /* Show zeros for restricted memory. */
159                         remaining = clear_user(buf, sz);
160                 } else {
161                         /*
162                          * On ia64 if a page has been mapped somewhere as
163                          * uncached, then it must also be accessed uncached
164                          * by the kernel or data corruption may occur.
165                          */
166                         ptr = xlate_dev_mem_ptr(p);
167                         if (!ptr)
168                                 goto failed;
169
170                         probe = copy_from_kernel_nofault(bounce, ptr, sz);
171                         unxlate_dev_mem_ptr(p, ptr);
172                         if (probe)
173                                 goto failed;
174
175                         remaining = copy_to_user(buf, bounce, sz);
176                 }
177
178                 if (remaining)
179                         goto failed;
180
181                 buf += sz;
182                 p += sz;
183                 count -= sz;
184                 read += sz;
185                 if (should_stop_iteration())
186                         break;
187         }
188         kfree(bounce);
189
190         *ppos += read;
191         return read;
192
193 failed:
194         kfree(bounce);
195         return err;
196 }
197
198 static ssize_t write_mem(struct file *file, const char __user *buf,
199                          size_t count, loff_t *ppos)
200 {
201         phys_addr_t p = *ppos;
202         ssize_t written, sz;
203         unsigned long copied;
204         void *ptr;
205
206         if (p != *ppos)
207                 return -EFBIG;
208
209         if (!valid_phys_addr_range(p, count))
210                 return -EFAULT;
211
212         written = 0;
213
214 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
215         /* we don't have page 0 mapped on sparc and m68k.. */
216         if (p < PAGE_SIZE) {
217                 sz = size_inside_page(p, count);
218                 /* Hmm. Do something? */
219                 buf += sz;
220                 p += sz;
221                 count -= sz;
222                 written += sz;
223         }
224 #endif
225
226         while (count > 0) {
227                 int allowed;
228
229                 sz = size_inside_page(p, count);
230
231                 allowed = page_is_allowed(p >> PAGE_SHIFT);
232                 if (!allowed)
233                         return -EPERM;
234
235                 /* Skip actual writing when a page is marked as restricted. */
236                 if (allowed == 1) {
237                         /*
238                          * On ia64 if a page has been mapped somewhere as
239                          * uncached, then it must also be accessed uncached
240                          * by the kernel or data corruption may occur.
241                          */
242                         ptr = xlate_dev_mem_ptr(p);
243                         if (!ptr) {
244                                 if (written)
245                                         break;
246                                 return -EFAULT;
247                         }
248
249                         copied = copy_from_user(ptr, buf, sz);
250                         unxlate_dev_mem_ptr(p, ptr);
251                         if (copied) {
252                                 written += sz - copied;
253                                 if (written)
254                                         break;
255                                 return -EFAULT;
256                         }
257                 }
258
259                 buf += sz;
260                 p += sz;
261                 count -= sz;
262                 written += sz;
263                 if (should_stop_iteration())
264                         break;
265         }
266
267         *ppos += written;
268         return written;
269 }
270
271 int __weak phys_mem_access_prot_allowed(struct file *file,
272         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
273 {
274         return 1;
275 }
276
277 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
278
279 /*
280  * Architectures vary in how they handle caching for addresses
281  * outside of main memory.
282  *
283  */
284 #ifdef pgprot_noncached
285 static int uncached_access(struct file *file, phys_addr_t addr)
286 {
287 #if defined(CONFIG_IA64)
288         /*
289          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
290          * attribute aliases.
291          */
292         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
293 #else
294         /*
295          * Accessing memory above the top the kernel knows about or through a
296          * file pointer
297          * that was marked O_DSYNC will be done non-cached.
298          */
299         if (file->f_flags & O_DSYNC)
300                 return 1;
301         return addr >= __pa(high_memory);
302 #endif
303 }
304 #endif
305
306 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
307                                      unsigned long size, pgprot_t vma_prot)
308 {
309 #ifdef pgprot_noncached
310         phys_addr_t offset = pfn << PAGE_SHIFT;
311
312         if (uncached_access(file, offset))
313                 return pgprot_noncached(vma_prot);
314 #endif
315         return vma_prot;
316 }
317 #endif
318
319 #ifndef CONFIG_MMU
320 static unsigned long get_unmapped_area_mem(struct file *file,
321                                            unsigned long addr,
322                                            unsigned long len,
323                                            unsigned long pgoff,
324                                            unsigned long flags)
325 {
326         if (!valid_mmap_phys_addr_range(pgoff, len))
327                 return (unsigned long) -EINVAL;
328         return pgoff << PAGE_SHIFT;
329 }
330
331 /* permit direct mmap, for read, write or exec */
332 static unsigned memory_mmap_capabilities(struct file *file)
333 {
334         return NOMMU_MAP_DIRECT |
335                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
336 }
337
338 static unsigned zero_mmap_capabilities(struct file *file)
339 {
340         return NOMMU_MAP_COPY;
341 }
342
343 /* can't do an in-place private mapping if there's no MMU */
344 static inline int private_mapping_ok(struct vm_area_struct *vma)
345 {
346         return vma->vm_flags & VM_MAYSHARE;
347 }
348 #else
349
350 static inline int private_mapping_ok(struct vm_area_struct *vma)
351 {
352         return 1;
353 }
354 #endif
355
356 static const struct vm_operations_struct mmap_mem_ops = {
357 #ifdef CONFIG_HAVE_IOREMAP_PROT
358         .access = generic_access_phys
359 #endif
360 };
361
362 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
363 {
364         size_t size = vma->vm_end - vma->vm_start;
365         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
366
367         /* Does it even fit in phys_addr_t? */
368         if (offset >> PAGE_SHIFT != vma->vm_pgoff)
369                 return -EINVAL;
370
371         /* It's illegal to wrap around the end of the physical address space. */
372         if (offset + (phys_addr_t)size - 1 < offset)
373                 return -EINVAL;
374
375         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
376                 return -EINVAL;
377
378         if (!private_mapping_ok(vma))
379                 return -ENOSYS;
380
381         if (!range_is_allowed(vma->vm_pgoff, size))
382                 return -EPERM;
383
384         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
385                                                 &vma->vm_page_prot))
386                 return -EINVAL;
387
388         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
389                                                  size,
390                                                  vma->vm_page_prot);
391
392         vma->vm_ops = &mmap_mem_ops;
393
394         /* Remap-pfn-range will mark the range VM_IO */
395         if (remap_pfn_range(vma,
396                             vma->vm_start,
397                             vma->vm_pgoff,
398                             size,
399                             vma->vm_page_prot)) {
400                 return -EAGAIN;
401         }
402         return 0;
403 }
404
405 static ssize_t read_port(struct file *file, char __user *buf,
406                          size_t count, loff_t *ppos)
407 {
408         unsigned long i = *ppos;
409         char __user *tmp = buf;
410
411         if (!access_ok(buf, count))
412                 return -EFAULT;
413         while (count-- > 0 && i < 65536) {
414                 if (__put_user(inb(i), tmp) < 0)
415                         return -EFAULT;
416                 i++;
417                 tmp++;
418         }
419         *ppos = i;
420         return tmp-buf;
421 }
422
423 static ssize_t write_port(struct file *file, const char __user *buf,
424                           size_t count, loff_t *ppos)
425 {
426         unsigned long i = *ppos;
427         const char __user *tmp = buf;
428
429         if (!access_ok(buf, count))
430                 return -EFAULT;
431         while (count-- > 0 && i < 65536) {
432                 char c;
433
434                 if (__get_user(c, tmp)) {
435                         if (tmp > buf)
436                                 break;
437                         return -EFAULT;
438                 }
439                 outb(c, i);
440                 i++;
441                 tmp++;
442         }
443         *ppos = i;
444         return tmp-buf;
445 }
446
447 static ssize_t read_null(struct file *file, char __user *buf,
448                          size_t count, loff_t *ppos)
449 {
450         return 0;
451 }
452
453 static ssize_t write_null(struct file *file, const char __user *buf,
454                           size_t count, loff_t *ppos)
455 {
456         return count;
457 }
458
459 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
460 {
461         return 0;
462 }
463
464 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
465 {
466         size_t count = iov_iter_count(from);
467         iov_iter_advance(from, count);
468         return count;
469 }
470
471 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
472                         struct splice_desc *sd)
473 {
474         return sd->len;
475 }
476
477 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
478                                  loff_t *ppos, size_t len, unsigned int flags)
479 {
480         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
481 }
482
483 static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
484 {
485         return 0;
486 }
487
488 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
489 {
490         size_t written = 0;
491
492         while (iov_iter_count(iter)) {
493                 size_t chunk = iov_iter_count(iter), n;
494
495                 if (chunk > PAGE_SIZE)
496                         chunk = PAGE_SIZE;      /* Just for latency reasons */
497                 n = iov_iter_zero(chunk, iter);
498                 if (!n && iov_iter_count(iter))
499                         return written ? written : -EFAULT;
500                 written += n;
501                 if (signal_pending(current))
502                         return written ? written : -ERESTARTSYS;
503                 if (!need_resched())
504                         continue;
505                 if (iocb->ki_flags & IOCB_NOWAIT)
506                         return written ? written : -EAGAIN;
507                 cond_resched();
508         }
509         return written;
510 }
511
512 static ssize_t read_zero(struct file *file, char __user *buf,
513                          size_t count, loff_t *ppos)
514 {
515         size_t cleared = 0;
516
517         while (count) {
518                 size_t chunk = min_t(size_t, count, PAGE_SIZE);
519                 size_t left;
520
521                 left = clear_user(buf + cleared, chunk);
522                 if (unlikely(left)) {
523                         cleared += (chunk - left);
524                         if (!cleared)
525                                 return -EFAULT;
526                         break;
527                 }
528                 cleared += chunk;
529                 count -= chunk;
530
531                 if (signal_pending(current))
532                         break;
533                 cond_resched();
534         }
535
536         return cleared;
537 }
538
539 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
540 {
541 #ifndef CONFIG_MMU
542         return -ENOSYS;
543 #endif
544         if (vma->vm_flags & VM_SHARED)
545                 return shmem_zero_setup(vma);
546         vma_set_anonymous(vma);
547         return 0;
548 }
549
550 static unsigned long get_unmapped_area_zero(struct file *file,
551                                 unsigned long addr, unsigned long len,
552                                 unsigned long pgoff, unsigned long flags)
553 {
554 #ifdef CONFIG_MMU
555         if (flags & MAP_SHARED) {
556                 /*
557                  * mmap_zero() will call shmem_zero_setup() to create a file,
558                  * so use shmem's get_unmapped_area in case it can be huge;
559                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
560                  * so as not to confuse shmem with our handle on "/dev/zero".
561                  */
562                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
563         }
564
565         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
566         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
567 #else
568         return -ENOSYS;
569 #endif
570 }
571
572 static ssize_t write_full(struct file *file, const char __user *buf,
573                           size_t count, loff_t *ppos)
574 {
575         return -ENOSPC;
576 }
577
578 /*
579  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
580  * can fopen() both devices with "a" now.  This was previously impossible.
581  * -- SRB.
582  */
583 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
584 {
585         return file->f_pos = 0;
586 }
587
588 /*
589  * The memory devices use the full 32/64 bits of the offset, and so we cannot
590  * check against negative addresses: they are ok. The return value is weird,
591  * though, in that case (0).
592  *
593  * also note that seeking relative to the "end of file" isn't supported:
594  * it has no meaning, so it returns -EINVAL.
595  */
596 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
597 {
598         loff_t ret;
599
600         inode_lock(file_inode(file));
601         switch (orig) {
602         case SEEK_CUR:
603                 offset += file->f_pos;
604                 fallthrough;
605         case SEEK_SET:
606                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
607                 if ((unsigned long long)offset >= -MAX_ERRNO) {
608                         ret = -EOVERFLOW;
609                         break;
610                 }
611                 file->f_pos = offset;
612                 ret = file->f_pos;
613                 force_successful_syscall_return();
614                 break;
615         default:
616                 ret = -EINVAL;
617         }
618         inode_unlock(file_inode(file));
619         return ret;
620 }
621
622 static int open_port(struct inode *inode, struct file *filp)
623 {
624         int rc;
625
626         if (!capable(CAP_SYS_RAWIO))
627                 return -EPERM;
628
629         rc = security_locked_down(LOCKDOWN_DEV_MEM);
630         if (rc)
631                 return rc;
632
633         if (iminor(inode) != DEVMEM_MINOR)
634                 return 0;
635
636         /*
637          * Use a unified address space to have a single point to manage
638          * revocations when drivers want to take over a /dev/mem mapped
639          * range.
640          */
641         filp->f_mapping = iomem_get_mapping();
642
643         return 0;
644 }
645
646 #define zero_lseek      null_lseek
647 #define full_lseek      null_lseek
648 #define write_zero      write_null
649 #define write_iter_zero write_iter_null
650 #define open_mem        open_port
651
652 static const struct file_operations __maybe_unused mem_fops = {
653         .llseek         = memory_lseek,
654         .read           = read_mem,
655         .write          = write_mem,
656         .mmap           = mmap_mem,
657         .open           = open_mem,
658 #ifndef CONFIG_MMU
659         .get_unmapped_area = get_unmapped_area_mem,
660         .mmap_capabilities = memory_mmap_capabilities,
661 #endif
662 };
663
664 static const struct file_operations null_fops = {
665         .llseek         = null_lseek,
666         .read           = read_null,
667         .write          = write_null,
668         .read_iter      = read_iter_null,
669         .write_iter     = write_iter_null,
670         .splice_write   = splice_write_null,
671         .uring_cmd      = uring_cmd_null,
672 };
673
674 static const struct file_operations __maybe_unused port_fops = {
675         .llseek         = memory_lseek,
676         .read           = read_port,
677         .write          = write_port,
678         .open           = open_port,
679 };
680
681 static const struct file_operations zero_fops = {
682         .llseek         = zero_lseek,
683         .write          = write_zero,
684         .read_iter      = read_iter_zero,
685         .read           = read_zero,
686         .write_iter     = write_iter_zero,
687         .mmap           = mmap_zero,
688         .get_unmapped_area = get_unmapped_area_zero,
689 #ifndef CONFIG_MMU
690         .mmap_capabilities = zero_mmap_capabilities,
691 #endif
692 };
693
694 static const struct file_operations full_fops = {
695         .llseek         = full_lseek,
696         .read_iter      = read_iter_zero,
697         .write          = write_full,
698 };
699
700 static const struct memdev {
701         const char *name;
702         umode_t mode;
703         const struct file_operations *fops;
704         fmode_t fmode;
705 } devlist[] = {
706 #ifdef CONFIG_DEVMEM
707          [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
708 #endif
709          [3] = { "null", 0666, &null_fops, FMODE_NOWAIT },
710 #ifdef CONFIG_DEVPORT
711          [4] = { "port", 0, &port_fops, 0 },
712 #endif
713          [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
714          [7] = { "full", 0666, &full_fops, 0 },
715          [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
716          [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
717 #ifdef CONFIG_PRINTK
718         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
719 #endif
720 };
721
722 static int memory_open(struct inode *inode, struct file *filp)
723 {
724         int minor;
725         const struct memdev *dev;
726
727         minor = iminor(inode);
728         if (minor >= ARRAY_SIZE(devlist))
729                 return -ENXIO;
730
731         dev = &devlist[minor];
732         if (!dev->fops)
733                 return -ENXIO;
734
735         filp->f_op = dev->fops;
736         filp->f_mode |= dev->fmode;
737
738         if (dev->fops->open)
739                 return dev->fops->open(inode, filp);
740
741         return 0;
742 }
743
744 static const struct file_operations memory_fops = {
745         .open = memory_open,
746         .llseek = noop_llseek,
747 };
748
749 static char *mem_devnode(const struct device *dev, umode_t *mode)
750 {
751         if (mode && devlist[MINOR(dev->devt)].mode)
752                 *mode = devlist[MINOR(dev->devt)].mode;
753         return NULL;
754 }
755
756 static struct class *mem_class;
757
758 static int __init chr_dev_init(void)
759 {
760         int minor;
761
762         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
763                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
764
765         mem_class = class_create(THIS_MODULE, "mem");
766         if (IS_ERR(mem_class))
767                 return PTR_ERR(mem_class);
768
769         mem_class->devnode = mem_devnode;
770         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
771                 if (!devlist[minor].name)
772                         continue;
773
774                 /*
775                  * Create /dev/port?
776                  */
777                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
778                         continue;
779
780                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
781                               NULL, devlist[minor].name);
782         }
783
784         return tty_init();
785 }
786
787 fs_initcall(chr_dev_init);