Merge tag 'timers-urgent-2021-02-22' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32 #include <linux/uaccess.h>
33 #include <linux/security.h>
34 #include <linux/pseudo_fs.h>
35 #include <uapi/linux/magic.h>
36 #include <linux/mount.h>
37
38 #ifdef CONFIG_IA64
39 # include <linux/efi.h>
40 #endif
41
42 #define DEVMEM_MINOR    1
43 #define DEVPORT_MINOR   4
44
45 static inline unsigned long size_inside_page(unsigned long start,
46                                              unsigned long size)
47 {
48         unsigned long sz;
49
50         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
51
52         return min(sz, size);
53 }
54
55 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
56 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
57 {
58         return addr + count <= __pa(high_memory);
59 }
60
61 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
62 {
63         return 1;
64 }
65 #endif
66
67 #ifdef CONFIG_STRICT_DEVMEM
68 static inline int page_is_allowed(unsigned long pfn)
69 {
70         return devmem_is_allowed(pfn);
71 }
72 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
73 {
74         u64 from = ((u64)pfn) << PAGE_SHIFT;
75         u64 to = from + size;
76         u64 cursor = from;
77
78         while (cursor < to) {
79                 if (!devmem_is_allowed(pfn))
80                         return 0;
81                 cursor += PAGE_SIZE;
82                 pfn++;
83         }
84         return 1;
85 }
86 #else
87 static inline int page_is_allowed(unsigned long pfn)
88 {
89         return 1;
90 }
91 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
92 {
93         return 1;
94 }
95 #endif
96
97 #ifndef unxlate_dev_mem_ptr
98 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
99 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
100 {
101 }
102 #endif
103
104 static inline bool should_stop_iteration(void)
105 {
106         if (need_resched())
107                 cond_resched();
108         return fatal_signal_pending(current);
109 }
110
111 /*
112  * This funcion reads the *physical* memory. The f_pos points directly to the
113  * memory location.
114  */
115 static ssize_t read_mem(struct file *file, char __user *buf,
116                         size_t count, loff_t *ppos)
117 {
118         phys_addr_t p = *ppos;
119         ssize_t read, sz;
120         void *ptr;
121         char *bounce;
122         int err;
123
124         if (p != *ppos)
125                 return 0;
126
127         if (!valid_phys_addr_range(p, count))
128                 return -EFAULT;
129         read = 0;
130 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
131         /* we don't have page 0 mapped on sparc and m68k.. */
132         if (p < PAGE_SIZE) {
133                 sz = size_inside_page(p, count);
134                 if (sz > 0) {
135                         if (clear_user(buf, sz))
136                                 return -EFAULT;
137                         buf += sz;
138                         p += sz;
139                         count -= sz;
140                         read += sz;
141                 }
142         }
143 #endif
144
145         bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
146         if (!bounce)
147                 return -ENOMEM;
148
149         while (count > 0) {
150                 unsigned long remaining;
151                 int allowed, probe;
152
153                 sz = size_inside_page(p, count);
154
155                 err = -EPERM;
156                 allowed = page_is_allowed(p >> PAGE_SHIFT);
157                 if (!allowed)
158                         goto failed;
159
160                 err = -EFAULT;
161                 if (allowed == 2) {
162                         /* Show zeros for restricted memory. */
163                         remaining = clear_user(buf, sz);
164                 } else {
165                         /*
166                          * On ia64 if a page has been mapped somewhere as
167                          * uncached, then it must also be accessed uncached
168                          * by the kernel or data corruption may occur.
169                          */
170                         ptr = xlate_dev_mem_ptr(p);
171                         if (!ptr)
172                                 goto failed;
173
174                         probe = copy_from_kernel_nofault(bounce, ptr, sz);
175                         unxlate_dev_mem_ptr(p, ptr);
176                         if (probe)
177                                 goto failed;
178
179                         remaining = copy_to_user(buf, bounce, sz);
180                 }
181
182                 if (remaining)
183                         goto failed;
184
185                 buf += sz;
186                 p += sz;
187                 count -= sz;
188                 read += sz;
189                 if (should_stop_iteration())
190                         break;
191         }
192         kfree(bounce);
193
194         *ppos += read;
195         return read;
196
197 failed:
198         kfree(bounce);
199         return err;
200 }
201
202 static ssize_t write_mem(struct file *file, const char __user *buf,
203                          size_t count, loff_t *ppos)
204 {
205         phys_addr_t p = *ppos;
206         ssize_t written, sz;
207         unsigned long copied;
208         void *ptr;
209
210         if (p != *ppos)
211                 return -EFBIG;
212
213         if (!valid_phys_addr_range(p, count))
214                 return -EFAULT;
215
216         written = 0;
217
218 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
219         /* we don't have page 0 mapped on sparc and m68k.. */
220         if (p < PAGE_SIZE) {
221                 sz = size_inside_page(p, count);
222                 /* Hmm. Do something? */
223                 buf += sz;
224                 p += sz;
225                 count -= sz;
226                 written += sz;
227         }
228 #endif
229
230         while (count > 0) {
231                 int allowed;
232
233                 sz = size_inside_page(p, count);
234
235                 allowed = page_is_allowed(p >> PAGE_SHIFT);
236                 if (!allowed)
237                         return -EPERM;
238
239                 /* Skip actual writing when a page is marked as restricted. */
240                 if (allowed == 1) {
241                         /*
242                          * On ia64 if a page has been mapped somewhere as
243                          * uncached, then it must also be accessed uncached
244                          * by the kernel or data corruption may occur.
245                          */
246                         ptr = xlate_dev_mem_ptr(p);
247                         if (!ptr) {
248                                 if (written)
249                                         break;
250                                 return -EFAULT;
251                         }
252
253                         copied = copy_from_user(ptr, buf, sz);
254                         unxlate_dev_mem_ptr(p, ptr);
255                         if (copied) {
256                                 written += sz - copied;
257                                 if (written)
258                                         break;
259                                 return -EFAULT;
260                         }
261                 }
262
263                 buf += sz;
264                 p += sz;
265                 count -= sz;
266                 written += sz;
267                 if (should_stop_iteration())
268                         break;
269         }
270
271         *ppos += written;
272         return written;
273 }
274
275 int __weak phys_mem_access_prot_allowed(struct file *file,
276         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
277 {
278         return 1;
279 }
280
281 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
282
283 /*
284  * Architectures vary in how they handle caching for addresses
285  * outside of main memory.
286  *
287  */
288 #ifdef pgprot_noncached
289 static int uncached_access(struct file *file, phys_addr_t addr)
290 {
291 #if defined(CONFIG_IA64)
292         /*
293          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
294          * attribute aliases.
295          */
296         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
297 #else
298         /*
299          * Accessing memory above the top the kernel knows about or through a
300          * file pointer
301          * that was marked O_DSYNC will be done non-cached.
302          */
303         if (file->f_flags & O_DSYNC)
304                 return 1;
305         return addr >= __pa(high_memory);
306 #endif
307 }
308 #endif
309
310 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
311                                      unsigned long size, pgprot_t vma_prot)
312 {
313 #ifdef pgprot_noncached
314         phys_addr_t offset = pfn << PAGE_SHIFT;
315
316         if (uncached_access(file, offset))
317                 return pgprot_noncached(vma_prot);
318 #endif
319         return vma_prot;
320 }
321 #endif
322
323 #ifndef CONFIG_MMU
324 static unsigned long get_unmapped_area_mem(struct file *file,
325                                            unsigned long addr,
326                                            unsigned long len,
327                                            unsigned long pgoff,
328                                            unsigned long flags)
329 {
330         if (!valid_mmap_phys_addr_range(pgoff, len))
331                 return (unsigned long) -EINVAL;
332         return pgoff << PAGE_SHIFT;
333 }
334
335 /* permit direct mmap, for read, write or exec */
336 static unsigned memory_mmap_capabilities(struct file *file)
337 {
338         return NOMMU_MAP_DIRECT |
339                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
340 }
341
342 static unsigned zero_mmap_capabilities(struct file *file)
343 {
344         return NOMMU_MAP_COPY;
345 }
346
347 /* can't do an in-place private mapping if there's no MMU */
348 static inline int private_mapping_ok(struct vm_area_struct *vma)
349 {
350         return vma->vm_flags & VM_MAYSHARE;
351 }
352 #else
353
354 static inline int private_mapping_ok(struct vm_area_struct *vma)
355 {
356         return 1;
357 }
358 #endif
359
360 static const struct vm_operations_struct mmap_mem_ops = {
361 #ifdef CONFIG_HAVE_IOREMAP_PROT
362         .access = generic_access_phys
363 #endif
364 };
365
366 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
367 {
368         size_t size = vma->vm_end - vma->vm_start;
369         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
370
371         /* Does it even fit in phys_addr_t? */
372         if (offset >> PAGE_SHIFT != vma->vm_pgoff)
373                 return -EINVAL;
374
375         /* It's illegal to wrap around the end of the physical address space. */
376         if (offset + (phys_addr_t)size - 1 < offset)
377                 return -EINVAL;
378
379         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
380                 return -EINVAL;
381
382         if (!private_mapping_ok(vma))
383                 return -ENOSYS;
384
385         if (!range_is_allowed(vma->vm_pgoff, size))
386                 return -EPERM;
387
388         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
389                                                 &vma->vm_page_prot))
390                 return -EINVAL;
391
392         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
393                                                  size,
394                                                  vma->vm_page_prot);
395
396         vma->vm_ops = &mmap_mem_ops;
397
398         /* Remap-pfn-range will mark the range VM_IO */
399         if (remap_pfn_range(vma,
400                             vma->vm_start,
401                             vma->vm_pgoff,
402                             size,
403                             vma->vm_page_prot)) {
404                 return -EAGAIN;
405         }
406         return 0;
407 }
408
409 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
410 {
411         unsigned long pfn;
412
413         /* Turn a kernel-virtual address into a physical page frame */
414         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
415
416         /*
417          * RED-PEN: on some architectures there is more mapped memory than
418          * available in mem_map which pfn_valid checks for. Perhaps should add a
419          * new macro here.
420          *
421          * RED-PEN: vmalloc is not supported right now.
422          */
423         if (!pfn_valid(pfn))
424                 return -EIO;
425
426         vma->vm_pgoff = pfn;
427         return mmap_mem(file, vma);
428 }
429
430 /*
431  * This function reads the *virtual* memory as seen by the kernel.
432  */
433 static ssize_t read_kmem(struct file *file, char __user *buf,
434                          size_t count, loff_t *ppos)
435 {
436         unsigned long p = *ppos;
437         ssize_t low_count, read, sz;
438         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
439         int err = 0;
440
441         read = 0;
442         if (p < (unsigned long) high_memory) {
443                 low_count = count;
444                 if (count > (unsigned long)high_memory - p)
445                         low_count = (unsigned long)high_memory - p;
446
447 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
448                 /* we don't have page 0 mapped on sparc and m68k.. */
449                 if (p < PAGE_SIZE && low_count > 0) {
450                         sz = size_inside_page(p, low_count);
451                         if (clear_user(buf, sz))
452                                 return -EFAULT;
453                         buf += sz;
454                         p += sz;
455                         read += sz;
456                         low_count -= sz;
457                         count -= sz;
458                 }
459 #endif
460                 while (low_count > 0) {
461                         sz = size_inside_page(p, low_count);
462
463                         /*
464                          * On ia64 if a page has been mapped somewhere as
465                          * uncached, then it must also be accessed uncached
466                          * by the kernel or data corruption may occur
467                          */
468                         kbuf = xlate_dev_kmem_ptr((void *)p);
469                         if (!virt_addr_valid(kbuf))
470                                 return -ENXIO;
471
472                         if (copy_to_user(buf, kbuf, sz))
473                                 return -EFAULT;
474                         buf += sz;
475                         p += sz;
476                         read += sz;
477                         low_count -= sz;
478                         count -= sz;
479                         if (should_stop_iteration()) {
480                                 count = 0;
481                                 break;
482                         }
483                 }
484         }
485
486         if (count > 0) {
487                 kbuf = (char *)__get_free_page(GFP_KERNEL);
488                 if (!kbuf)
489                         return -ENOMEM;
490                 while (count > 0) {
491                         sz = size_inside_page(p, count);
492                         if (!is_vmalloc_or_module_addr((void *)p)) {
493                                 err = -ENXIO;
494                                 break;
495                         }
496                         sz = vread(kbuf, (char *)p, sz);
497                         if (!sz)
498                                 break;
499                         if (copy_to_user(buf, kbuf, sz)) {
500                                 err = -EFAULT;
501                                 break;
502                         }
503                         count -= sz;
504                         buf += sz;
505                         read += sz;
506                         p += sz;
507                         if (should_stop_iteration())
508                                 break;
509                 }
510                 free_page((unsigned long)kbuf);
511         }
512         *ppos = p;
513         return read ? read : err;
514 }
515
516
517 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
518                                 size_t count, loff_t *ppos)
519 {
520         ssize_t written, sz;
521         unsigned long copied;
522
523         written = 0;
524 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
525         /* we don't have page 0 mapped on sparc and m68k.. */
526         if (p < PAGE_SIZE) {
527                 sz = size_inside_page(p, count);
528                 /* Hmm. Do something? */
529                 buf += sz;
530                 p += sz;
531                 count -= sz;
532                 written += sz;
533         }
534 #endif
535
536         while (count > 0) {
537                 void *ptr;
538
539                 sz = size_inside_page(p, count);
540
541                 /*
542                  * On ia64 if a page has been mapped somewhere as uncached, then
543                  * it must also be accessed uncached by the kernel or data
544                  * corruption may occur.
545                  */
546                 ptr = xlate_dev_kmem_ptr((void *)p);
547                 if (!virt_addr_valid(ptr))
548                         return -ENXIO;
549
550                 copied = copy_from_user(ptr, buf, sz);
551                 if (copied) {
552                         written += sz - copied;
553                         if (written)
554                                 break;
555                         return -EFAULT;
556                 }
557                 buf += sz;
558                 p += sz;
559                 count -= sz;
560                 written += sz;
561                 if (should_stop_iteration())
562                         break;
563         }
564
565         *ppos += written;
566         return written;
567 }
568
569 /*
570  * This function writes to the *virtual* memory as seen by the kernel.
571  */
572 static ssize_t write_kmem(struct file *file, const char __user *buf,
573                           size_t count, loff_t *ppos)
574 {
575         unsigned long p = *ppos;
576         ssize_t wrote = 0;
577         ssize_t virtr = 0;
578         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
579         int err = 0;
580
581         if (p < (unsigned long) high_memory) {
582                 unsigned long to_write = min_t(unsigned long, count,
583                                                (unsigned long)high_memory - p);
584                 wrote = do_write_kmem(p, buf, to_write, ppos);
585                 if (wrote != to_write)
586                         return wrote;
587                 p += wrote;
588                 buf += wrote;
589                 count -= wrote;
590         }
591
592         if (count > 0) {
593                 kbuf = (char *)__get_free_page(GFP_KERNEL);
594                 if (!kbuf)
595                         return wrote ? wrote : -ENOMEM;
596                 while (count > 0) {
597                         unsigned long sz = size_inside_page(p, count);
598                         unsigned long n;
599
600                         if (!is_vmalloc_or_module_addr((void *)p)) {
601                                 err = -ENXIO;
602                                 break;
603                         }
604                         n = copy_from_user(kbuf, buf, sz);
605                         if (n) {
606                                 err = -EFAULT;
607                                 break;
608                         }
609                         vwrite(kbuf, (char *)p, sz);
610                         count -= sz;
611                         buf += sz;
612                         virtr += sz;
613                         p += sz;
614                         if (should_stop_iteration())
615                                 break;
616                 }
617                 free_page((unsigned long)kbuf);
618         }
619
620         *ppos = p;
621         return virtr + wrote ? : err;
622 }
623
624 static ssize_t read_port(struct file *file, char __user *buf,
625                          size_t count, loff_t *ppos)
626 {
627         unsigned long i = *ppos;
628         char __user *tmp = buf;
629
630         if (!access_ok(buf, count))
631                 return -EFAULT;
632         while (count-- > 0 && i < 65536) {
633                 if (__put_user(inb(i), tmp) < 0)
634                         return -EFAULT;
635                 i++;
636                 tmp++;
637         }
638         *ppos = i;
639         return tmp-buf;
640 }
641
642 static ssize_t write_port(struct file *file, const char __user *buf,
643                           size_t count, loff_t *ppos)
644 {
645         unsigned long i = *ppos;
646         const char __user *tmp = buf;
647
648         if (!access_ok(buf, count))
649                 return -EFAULT;
650         while (count-- > 0 && i < 65536) {
651                 char c;
652
653                 if (__get_user(c, tmp)) {
654                         if (tmp > buf)
655                                 break;
656                         return -EFAULT;
657                 }
658                 outb(c, i);
659                 i++;
660                 tmp++;
661         }
662         *ppos = i;
663         return tmp-buf;
664 }
665
666 static ssize_t read_null(struct file *file, char __user *buf,
667                          size_t count, loff_t *ppos)
668 {
669         return 0;
670 }
671
672 static ssize_t write_null(struct file *file, const char __user *buf,
673                           size_t count, loff_t *ppos)
674 {
675         return count;
676 }
677
678 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
679 {
680         return 0;
681 }
682
683 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
684 {
685         size_t count = iov_iter_count(from);
686         iov_iter_advance(from, count);
687         return count;
688 }
689
690 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
691                         struct splice_desc *sd)
692 {
693         return sd->len;
694 }
695
696 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
697                                  loff_t *ppos, size_t len, unsigned int flags)
698 {
699         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
700 }
701
702 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
703 {
704         size_t written = 0;
705
706         while (iov_iter_count(iter)) {
707                 size_t chunk = iov_iter_count(iter), n;
708
709                 if (chunk > PAGE_SIZE)
710                         chunk = PAGE_SIZE;      /* Just for latency reasons */
711                 n = iov_iter_zero(chunk, iter);
712                 if (!n && iov_iter_count(iter))
713                         return written ? written : -EFAULT;
714                 written += n;
715                 if (signal_pending(current))
716                         return written ? written : -ERESTARTSYS;
717                 cond_resched();
718         }
719         return written;
720 }
721
722 static ssize_t read_zero(struct file *file, char __user *buf,
723                          size_t count, loff_t *ppos)
724 {
725         size_t cleared = 0;
726
727         while (count) {
728                 size_t chunk = min_t(size_t, count, PAGE_SIZE);
729                 size_t left;
730
731                 left = clear_user(buf + cleared, chunk);
732                 if (unlikely(left)) {
733                         cleared += (chunk - left);
734                         if (!cleared)
735                                 return -EFAULT;
736                         break;
737                 }
738                 cleared += chunk;
739                 count -= chunk;
740
741                 if (signal_pending(current))
742                         break;
743                 cond_resched();
744         }
745
746         return cleared;
747 }
748
749 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
750 {
751 #ifndef CONFIG_MMU
752         return -ENOSYS;
753 #endif
754         if (vma->vm_flags & VM_SHARED)
755                 return shmem_zero_setup(vma);
756         vma_set_anonymous(vma);
757         return 0;
758 }
759
760 static unsigned long get_unmapped_area_zero(struct file *file,
761                                 unsigned long addr, unsigned long len,
762                                 unsigned long pgoff, unsigned long flags)
763 {
764 #ifdef CONFIG_MMU
765         if (flags & MAP_SHARED) {
766                 /*
767                  * mmap_zero() will call shmem_zero_setup() to create a file,
768                  * so use shmem's get_unmapped_area in case it can be huge;
769                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
770                  * so as not to confuse shmem with our handle on "/dev/zero".
771                  */
772                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
773         }
774
775         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
776         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
777 #else
778         return -ENOSYS;
779 #endif
780 }
781
782 static ssize_t write_full(struct file *file, const char __user *buf,
783                           size_t count, loff_t *ppos)
784 {
785         return -ENOSPC;
786 }
787
788 /*
789  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
790  * can fopen() both devices with "a" now.  This was previously impossible.
791  * -- SRB.
792  */
793 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
794 {
795         return file->f_pos = 0;
796 }
797
798 /*
799  * The memory devices use the full 32/64 bits of the offset, and so we cannot
800  * check against negative addresses: they are ok. The return value is weird,
801  * though, in that case (0).
802  *
803  * also note that seeking relative to the "end of file" isn't supported:
804  * it has no meaning, so it returns -EINVAL.
805  */
806 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
807 {
808         loff_t ret;
809
810         inode_lock(file_inode(file));
811         switch (orig) {
812         case SEEK_CUR:
813                 offset += file->f_pos;
814                 fallthrough;
815         case SEEK_SET:
816                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
817                 if ((unsigned long long)offset >= -MAX_ERRNO) {
818                         ret = -EOVERFLOW;
819                         break;
820                 }
821                 file->f_pos = offset;
822                 ret = file->f_pos;
823                 force_successful_syscall_return();
824                 break;
825         default:
826                 ret = -EINVAL;
827         }
828         inode_unlock(file_inode(file));
829         return ret;
830 }
831
832 static struct inode *devmem_inode;
833
834 #ifdef CONFIG_IO_STRICT_DEVMEM
835 void revoke_devmem(struct resource *res)
836 {
837         /* pairs with smp_store_release() in devmem_init_inode() */
838         struct inode *inode = smp_load_acquire(&devmem_inode);
839
840         /*
841          * Check that the initialization has completed. Losing the race
842          * is ok because it means drivers are claiming resources before
843          * the fs_initcall level of init and prevent /dev/mem from
844          * establishing mappings.
845          */
846         if (!inode)
847                 return;
848
849         /*
850          * The expectation is that the driver has successfully marked
851          * the resource busy by this point, so devmem_is_allowed()
852          * should start returning false, however for performance this
853          * does not iterate the entire resource range.
854          */
855         if (devmem_is_allowed(PHYS_PFN(res->start)) &&
856             devmem_is_allowed(PHYS_PFN(res->end))) {
857                 /*
858                  * *cringe* iomem=relaxed says "go ahead, what's the
859                  * worst that can happen?"
860                  */
861                 return;
862         }
863
864         unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
865 }
866 #endif
867
868 static int open_port(struct inode *inode, struct file *filp)
869 {
870         int rc;
871
872         if (!capable(CAP_SYS_RAWIO))
873                 return -EPERM;
874
875         rc = security_locked_down(LOCKDOWN_DEV_MEM);
876         if (rc)
877                 return rc;
878
879         if (iminor(inode) != DEVMEM_MINOR)
880                 return 0;
881
882         /*
883          * Use a unified address space to have a single point to manage
884          * revocations when drivers want to take over a /dev/mem mapped
885          * range.
886          */
887         inode->i_mapping = devmem_inode->i_mapping;
888         filp->f_mapping = inode->i_mapping;
889
890         return 0;
891 }
892
893 #define zero_lseek      null_lseek
894 #define full_lseek      null_lseek
895 #define write_zero      write_null
896 #define write_iter_zero write_iter_null
897 #define open_mem        open_port
898 #define open_kmem       open_mem
899
900 static const struct file_operations __maybe_unused mem_fops = {
901         .llseek         = memory_lseek,
902         .read           = read_mem,
903         .write          = write_mem,
904         .mmap           = mmap_mem,
905         .open           = open_mem,
906 #ifndef CONFIG_MMU
907         .get_unmapped_area = get_unmapped_area_mem,
908         .mmap_capabilities = memory_mmap_capabilities,
909 #endif
910 };
911
912 static const struct file_operations __maybe_unused kmem_fops = {
913         .llseek         = memory_lseek,
914         .read           = read_kmem,
915         .write          = write_kmem,
916         .mmap           = mmap_kmem,
917         .open           = open_kmem,
918 #ifndef CONFIG_MMU
919         .get_unmapped_area = get_unmapped_area_mem,
920         .mmap_capabilities = memory_mmap_capabilities,
921 #endif
922 };
923
924 static const struct file_operations null_fops = {
925         .llseek         = null_lseek,
926         .read           = read_null,
927         .write          = write_null,
928         .read_iter      = read_iter_null,
929         .write_iter     = write_iter_null,
930         .splice_write   = splice_write_null,
931 };
932
933 static const struct file_operations __maybe_unused port_fops = {
934         .llseek         = memory_lseek,
935         .read           = read_port,
936         .write          = write_port,
937         .open           = open_port,
938 };
939
940 static const struct file_operations zero_fops = {
941         .llseek         = zero_lseek,
942         .write          = write_zero,
943         .read_iter      = read_iter_zero,
944         .read           = read_zero,
945         .write_iter     = write_iter_zero,
946         .mmap           = mmap_zero,
947         .get_unmapped_area = get_unmapped_area_zero,
948 #ifndef CONFIG_MMU
949         .mmap_capabilities = zero_mmap_capabilities,
950 #endif
951 };
952
953 static const struct file_operations full_fops = {
954         .llseek         = full_lseek,
955         .read_iter      = read_iter_zero,
956         .write          = write_full,
957 };
958
959 static const struct memdev {
960         const char *name;
961         umode_t mode;
962         const struct file_operations *fops;
963         fmode_t fmode;
964 } devlist[] = {
965 #ifdef CONFIG_DEVMEM
966          [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
967 #endif
968 #ifdef CONFIG_DEVKMEM
969          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
970 #endif
971          [3] = { "null", 0666, &null_fops, 0 },
972 #ifdef CONFIG_DEVPORT
973          [4] = { "port", 0, &port_fops, 0 },
974 #endif
975          [5] = { "zero", 0666, &zero_fops, 0 },
976          [7] = { "full", 0666, &full_fops, 0 },
977          [8] = { "random", 0666, &random_fops, 0 },
978          [9] = { "urandom", 0666, &urandom_fops, 0 },
979 #ifdef CONFIG_PRINTK
980         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
981 #endif
982 };
983
984 static int memory_open(struct inode *inode, struct file *filp)
985 {
986         int minor;
987         const struct memdev *dev;
988
989         minor = iminor(inode);
990         if (minor >= ARRAY_SIZE(devlist))
991                 return -ENXIO;
992
993         dev = &devlist[minor];
994         if (!dev->fops)
995                 return -ENXIO;
996
997         filp->f_op = dev->fops;
998         filp->f_mode |= dev->fmode;
999
1000         if (dev->fops->open)
1001                 return dev->fops->open(inode, filp);
1002
1003         return 0;
1004 }
1005
1006 static const struct file_operations memory_fops = {
1007         .open = memory_open,
1008         .llseek = noop_llseek,
1009 };
1010
1011 static char *mem_devnode(struct device *dev, umode_t *mode)
1012 {
1013         if (mode && devlist[MINOR(dev->devt)].mode)
1014                 *mode = devlist[MINOR(dev->devt)].mode;
1015         return NULL;
1016 }
1017
1018 static struct class *mem_class;
1019
1020 static int devmem_fs_init_fs_context(struct fs_context *fc)
1021 {
1022         return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1023 }
1024
1025 static struct file_system_type devmem_fs_type = {
1026         .name           = "devmem",
1027         .owner          = THIS_MODULE,
1028         .init_fs_context = devmem_fs_init_fs_context,
1029         .kill_sb        = kill_anon_super,
1030 };
1031
1032 static int devmem_init_inode(void)
1033 {
1034         static struct vfsmount *devmem_vfs_mount;
1035         static int devmem_fs_cnt;
1036         struct inode *inode;
1037         int rc;
1038
1039         rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
1040         if (rc < 0) {
1041                 pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
1042                 return rc;
1043         }
1044
1045         inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
1046         if (IS_ERR(inode)) {
1047                 rc = PTR_ERR(inode);
1048                 pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
1049                 simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
1050                 return rc;
1051         }
1052
1053         /*
1054          * Publish /dev/mem initialized.
1055          * Pairs with smp_load_acquire() in revoke_devmem().
1056          */
1057         smp_store_release(&devmem_inode, inode);
1058
1059         return 0;
1060 }
1061
1062 static int __init chr_dev_init(void)
1063 {
1064         int minor;
1065
1066         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
1067                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1068
1069         mem_class = class_create(THIS_MODULE, "mem");
1070         if (IS_ERR(mem_class))
1071                 return PTR_ERR(mem_class);
1072
1073         mem_class->devnode = mem_devnode;
1074         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1075                 if (!devlist[minor].name)
1076                         continue;
1077
1078                 /*
1079                  * Create /dev/port?
1080                  */
1081                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1082                         continue;
1083                 if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
1084                         continue;
1085
1086                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1087                               NULL, devlist[minor].name);
1088         }
1089
1090         return tty_init();
1091 }
1092
1093 fs_initcall(chr_dev_init);