Merge tag 'usb-serial-5.13-rc1' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32 #include <linux/uaccess.h>
33 #include <linux/security.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVMEM_MINOR    1
40 #define DEVPORT_MINOR   4
41
42 static inline unsigned long size_inside_page(unsigned long start,
43                                              unsigned long size)
44 {
45         unsigned long sz;
46
47         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
48
49         return min(sz, size);
50 }
51
52 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
53 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
54 {
55         return addr + count <= __pa(high_memory);
56 }
57
58 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
59 {
60         return 1;
61 }
62 #endif
63
64 #ifdef CONFIG_STRICT_DEVMEM
65 static inline int page_is_allowed(unsigned long pfn)
66 {
67         return devmem_is_allowed(pfn);
68 }
69 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
70 {
71         u64 from = ((u64)pfn) << PAGE_SHIFT;
72         u64 to = from + size;
73         u64 cursor = from;
74
75         while (cursor < to) {
76                 if (!devmem_is_allowed(pfn))
77                         return 0;
78                 cursor += PAGE_SIZE;
79                 pfn++;
80         }
81         return 1;
82 }
83 #else
84 static inline int page_is_allowed(unsigned long pfn)
85 {
86         return 1;
87 }
88 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
89 {
90         return 1;
91 }
92 #endif
93
94 #ifndef unxlate_dev_mem_ptr
95 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
96 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
97 {
98 }
99 #endif
100
101 static inline bool should_stop_iteration(void)
102 {
103         if (need_resched())
104                 cond_resched();
105         return fatal_signal_pending(current);
106 }
107
108 /*
109  * This funcion reads the *physical* memory. The f_pos points directly to the
110  * memory location.
111  */
112 static ssize_t read_mem(struct file *file, char __user *buf,
113                         size_t count, loff_t *ppos)
114 {
115         phys_addr_t p = *ppos;
116         ssize_t read, sz;
117         void *ptr;
118         char *bounce;
119         int err;
120
121         if (p != *ppos)
122                 return 0;
123
124         if (!valid_phys_addr_range(p, count))
125                 return -EFAULT;
126         read = 0;
127 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
128         /* we don't have page 0 mapped on sparc and m68k.. */
129         if (p < PAGE_SIZE) {
130                 sz = size_inside_page(p, count);
131                 if (sz > 0) {
132                         if (clear_user(buf, sz))
133                                 return -EFAULT;
134                         buf += sz;
135                         p += sz;
136                         count -= sz;
137                         read += sz;
138                 }
139         }
140 #endif
141
142         bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
143         if (!bounce)
144                 return -ENOMEM;
145
146         while (count > 0) {
147                 unsigned long remaining;
148                 int allowed, probe;
149
150                 sz = size_inside_page(p, count);
151
152                 err = -EPERM;
153                 allowed = page_is_allowed(p >> PAGE_SHIFT);
154                 if (!allowed)
155                         goto failed;
156
157                 err = -EFAULT;
158                 if (allowed == 2) {
159                         /* Show zeros for restricted memory. */
160                         remaining = clear_user(buf, sz);
161                 } else {
162                         /*
163                          * On ia64 if a page has been mapped somewhere as
164                          * uncached, then it must also be accessed uncached
165                          * by the kernel or data corruption may occur.
166                          */
167                         ptr = xlate_dev_mem_ptr(p);
168                         if (!ptr)
169                                 goto failed;
170
171                         probe = copy_from_kernel_nofault(bounce, ptr, sz);
172                         unxlate_dev_mem_ptr(p, ptr);
173                         if (probe)
174                                 goto failed;
175
176                         remaining = copy_to_user(buf, bounce, sz);
177                 }
178
179                 if (remaining)
180                         goto failed;
181
182                 buf += sz;
183                 p += sz;
184                 count -= sz;
185                 read += sz;
186                 if (should_stop_iteration())
187                         break;
188         }
189         kfree(bounce);
190
191         *ppos += read;
192         return read;
193
194 failed:
195         kfree(bounce);
196         return err;
197 }
198
199 static ssize_t write_mem(struct file *file, const char __user *buf,
200                          size_t count, loff_t *ppos)
201 {
202         phys_addr_t p = *ppos;
203         ssize_t written, sz;
204         unsigned long copied;
205         void *ptr;
206
207         if (p != *ppos)
208                 return -EFBIG;
209
210         if (!valid_phys_addr_range(p, count))
211                 return -EFAULT;
212
213         written = 0;
214
215 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
216         /* we don't have page 0 mapped on sparc and m68k.. */
217         if (p < PAGE_SIZE) {
218                 sz = size_inside_page(p, count);
219                 /* Hmm. Do something? */
220                 buf += sz;
221                 p += sz;
222                 count -= sz;
223                 written += sz;
224         }
225 #endif
226
227         while (count > 0) {
228                 int allowed;
229
230                 sz = size_inside_page(p, count);
231
232                 allowed = page_is_allowed(p >> PAGE_SHIFT);
233                 if (!allowed)
234                         return -EPERM;
235
236                 /* Skip actual writing when a page is marked as restricted. */
237                 if (allowed == 1) {
238                         /*
239                          * On ia64 if a page has been mapped somewhere as
240                          * uncached, then it must also be accessed uncached
241                          * by the kernel or data corruption may occur.
242                          */
243                         ptr = xlate_dev_mem_ptr(p);
244                         if (!ptr) {
245                                 if (written)
246                                         break;
247                                 return -EFAULT;
248                         }
249
250                         copied = copy_from_user(ptr, buf, sz);
251                         unxlate_dev_mem_ptr(p, ptr);
252                         if (copied) {
253                                 written += sz - copied;
254                                 if (written)
255                                         break;
256                                 return -EFAULT;
257                         }
258                 }
259
260                 buf += sz;
261                 p += sz;
262                 count -= sz;
263                 written += sz;
264                 if (should_stop_iteration())
265                         break;
266         }
267
268         *ppos += written;
269         return written;
270 }
271
272 int __weak phys_mem_access_prot_allowed(struct file *file,
273         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
274 {
275         return 1;
276 }
277
278 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
279
280 /*
281  * Architectures vary in how they handle caching for addresses
282  * outside of main memory.
283  *
284  */
285 #ifdef pgprot_noncached
286 static int uncached_access(struct file *file, phys_addr_t addr)
287 {
288 #if defined(CONFIG_IA64)
289         /*
290          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
291          * attribute aliases.
292          */
293         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
294 #else
295         /*
296          * Accessing memory above the top the kernel knows about or through a
297          * file pointer
298          * that was marked O_DSYNC will be done non-cached.
299          */
300         if (file->f_flags & O_DSYNC)
301                 return 1;
302         return addr >= __pa(high_memory);
303 #endif
304 }
305 #endif
306
307 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
308                                      unsigned long size, pgprot_t vma_prot)
309 {
310 #ifdef pgprot_noncached
311         phys_addr_t offset = pfn << PAGE_SHIFT;
312
313         if (uncached_access(file, offset))
314                 return pgprot_noncached(vma_prot);
315 #endif
316         return vma_prot;
317 }
318 #endif
319
320 #ifndef CONFIG_MMU
321 static unsigned long get_unmapped_area_mem(struct file *file,
322                                            unsigned long addr,
323                                            unsigned long len,
324                                            unsigned long pgoff,
325                                            unsigned long flags)
326 {
327         if (!valid_mmap_phys_addr_range(pgoff, len))
328                 return (unsigned long) -EINVAL;
329         return pgoff << PAGE_SHIFT;
330 }
331
332 /* permit direct mmap, for read, write or exec */
333 static unsigned memory_mmap_capabilities(struct file *file)
334 {
335         return NOMMU_MAP_DIRECT |
336                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
337 }
338
339 static unsigned zero_mmap_capabilities(struct file *file)
340 {
341         return NOMMU_MAP_COPY;
342 }
343
344 /* can't do an in-place private mapping if there's no MMU */
345 static inline int private_mapping_ok(struct vm_area_struct *vma)
346 {
347         return vma->vm_flags & VM_MAYSHARE;
348 }
349 #else
350
351 static inline int private_mapping_ok(struct vm_area_struct *vma)
352 {
353         return 1;
354 }
355 #endif
356
357 static const struct vm_operations_struct mmap_mem_ops = {
358 #ifdef CONFIG_HAVE_IOREMAP_PROT
359         .access = generic_access_phys
360 #endif
361 };
362
363 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
364 {
365         size_t size = vma->vm_end - vma->vm_start;
366         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
367
368         /* Does it even fit in phys_addr_t? */
369         if (offset >> PAGE_SHIFT != vma->vm_pgoff)
370                 return -EINVAL;
371
372         /* It's illegal to wrap around the end of the physical address space. */
373         if (offset + (phys_addr_t)size - 1 < offset)
374                 return -EINVAL;
375
376         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
377                 return -EINVAL;
378
379         if (!private_mapping_ok(vma))
380                 return -ENOSYS;
381
382         if (!range_is_allowed(vma->vm_pgoff, size))
383                 return -EPERM;
384
385         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
386                                                 &vma->vm_page_prot))
387                 return -EINVAL;
388
389         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
390                                                  size,
391                                                  vma->vm_page_prot);
392
393         vma->vm_ops = &mmap_mem_ops;
394
395         /* Remap-pfn-range will mark the range VM_IO */
396         if (remap_pfn_range(vma,
397                             vma->vm_start,
398                             vma->vm_pgoff,
399                             size,
400                             vma->vm_page_prot)) {
401                 return -EAGAIN;
402         }
403         return 0;
404 }
405
406 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
407 {
408         unsigned long pfn;
409
410         /* Turn a kernel-virtual address into a physical page frame */
411         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
412
413         /*
414          * RED-PEN: on some architectures there is more mapped memory than
415          * available in mem_map which pfn_valid checks for. Perhaps should add a
416          * new macro here.
417          *
418          * RED-PEN: vmalloc is not supported right now.
419          */
420         if (!pfn_valid(pfn))
421                 return -EIO;
422
423         vma->vm_pgoff = pfn;
424         return mmap_mem(file, vma);
425 }
426
427 /*
428  * This function reads the *virtual* memory as seen by the kernel.
429  */
430 static ssize_t read_kmem(struct file *file, char __user *buf,
431                          size_t count, loff_t *ppos)
432 {
433         unsigned long p = *ppos;
434         ssize_t low_count, read, sz;
435         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
436         int err = 0;
437
438         read = 0;
439         if (p < (unsigned long) high_memory) {
440                 low_count = count;
441                 if (count > (unsigned long)high_memory - p)
442                         low_count = (unsigned long)high_memory - p;
443
444 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
445                 /* we don't have page 0 mapped on sparc and m68k.. */
446                 if (p < PAGE_SIZE && low_count > 0) {
447                         sz = size_inside_page(p, low_count);
448                         if (clear_user(buf, sz))
449                                 return -EFAULT;
450                         buf += sz;
451                         p += sz;
452                         read += sz;
453                         low_count -= sz;
454                         count -= sz;
455                 }
456 #endif
457                 while (low_count > 0) {
458                         sz = size_inside_page(p, low_count);
459
460                         /*
461                          * On ia64 if a page has been mapped somewhere as
462                          * uncached, then it must also be accessed uncached
463                          * by the kernel or data corruption may occur
464                          */
465                         kbuf = xlate_dev_kmem_ptr((void *)p);
466                         if (!virt_addr_valid(kbuf))
467                                 return -ENXIO;
468
469                         if (copy_to_user(buf, kbuf, sz))
470                                 return -EFAULT;
471                         buf += sz;
472                         p += sz;
473                         read += sz;
474                         low_count -= sz;
475                         count -= sz;
476                         if (should_stop_iteration()) {
477                                 count = 0;
478                                 break;
479                         }
480                 }
481         }
482
483         if (count > 0) {
484                 kbuf = (char *)__get_free_page(GFP_KERNEL);
485                 if (!kbuf)
486                         return -ENOMEM;
487                 while (count > 0) {
488                         sz = size_inside_page(p, count);
489                         if (!is_vmalloc_or_module_addr((void *)p)) {
490                                 err = -ENXIO;
491                                 break;
492                         }
493                         sz = vread(kbuf, (char *)p, sz);
494                         if (!sz)
495                                 break;
496                         if (copy_to_user(buf, kbuf, sz)) {
497                                 err = -EFAULT;
498                                 break;
499                         }
500                         count -= sz;
501                         buf += sz;
502                         read += sz;
503                         p += sz;
504                         if (should_stop_iteration())
505                                 break;
506                 }
507                 free_page((unsigned long)kbuf);
508         }
509         *ppos = p;
510         return read ? read : err;
511 }
512
513
514 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
515                                 size_t count, loff_t *ppos)
516 {
517         ssize_t written, sz;
518         unsigned long copied;
519
520         written = 0;
521 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
522         /* we don't have page 0 mapped on sparc and m68k.. */
523         if (p < PAGE_SIZE) {
524                 sz = size_inside_page(p, count);
525                 /* Hmm. Do something? */
526                 buf += sz;
527                 p += sz;
528                 count -= sz;
529                 written += sz;
530         }
531 #endif
532
533         while (count > 0) {
534                 void *ptr;
535
536                 sz = size_inside_page(p, count);
537
538                 /*
539                  * On ia64 if a page has been mapped somewhere as uncached, then
540                  * it must also be accessed uncached by the kernel or data
541                  * corruption may occur.
542                  */
543                 ptr = xlate_dev_kmem_ptr((void *)p);
544                 if (!virt_addr_valid(ptr))
545                         return -ENXIO;
546
547                 copied = copy_from_user(ptr, buf, sz);
548                 if (copied) {
549                         written += sz - copied;
550                         if (written)
551                                 break;
552                         return -EFAULT;
553                 }
554                 buf += sz;
555                 p += sz;
556                 count -= sz;
557                 written += sz;
558                 if (should_stop_iteration())
559                         break;
560         }
561
562         *ppos += written;
563         return written;
564 }
565
566 /*
567  * This function writes to the *virtual* memory as seen by the kernel.
568  */
569 static ssize_t write_kmem(struct file *file, const char __user *buf,
570                           size_t count, loff_t *ppos)
571 {
572         unsigned long p = *ppos;
573         ssize_t wrote = 0;
574         ssize_t virtr = 0;
575         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
576         int err = 0;
577
578         if (p < (unsigned long) high_memory) {
579                 unsigned long to_write = min_t(unsigned long, count,
580                                                (unsigned long)high_memory - p);
581                 wrote = do_write_kmem(p, buf, to_write, ppos);
582                 if (wrote != to_write)
583                         return wrote;
584                 p += wrote;
585                 buf += wrote;
586                 count -= wrote;
587         }
588
589         if (count > 0) {
590                 kbuf = (char *)__get_free_page(GFP_KERNEL);
591                 if (!kbuf)
592                         return wrote ? wrote : -ENOMEM;
593                 while (count > 0) {
594                         unsigned long sz = size_inside_page(p, count);
595                         unsigned long n;
596
597                         if (!is_vmalloc_or_module_addr((void *)p)) {
598                                 err = -ENXIO;
599                                 break;
600                         }
601                         n = copy_from_user(kbuf, buf, sz);
602                         if (n) {
603                                 err = -EFAULT;
604                                 break;
605                         }
606                         vwrite(kbuf, (char *)p, sz);
607                         count -= sz;
608                         buf += sz;
609                         virtr += sz;
610                         p += sz;
611                         if (should_stop_iteration())
612                                 break;
613                 }
614                 free_page((unsigned long)kbuf);
615         }
616
617         *ppos = p;
618         return virtr + wrote ? : err;
619 }
620
621 static ssize_t read_port(struct file *file, char __user *buf,
622                          size_t count, loff_t *ppos)
623 {
624         unsigned long i = *ppos;
625         char __user *tmp = buf;
626
627         if (!access_ok(buf, count))
628                 return -EFAULT;
629         while (count-- > 0 && i < 65536) {
630                 if (__put_user(inb(i), tmp) < 0)
631                         return -EFAULT;
632                 i++;
633                 tmp++;
634         }
635         *ppos = i;
636         return tmp-buf;
637 }
638
639 static ssize_t write_port(struct file *file, const char __user *buf,
640                           size_t count, loff_t *ppos)
641 {
642         unsigned long i = *ppos;
643         const char __user *tmp = buf;
644
645         if (!access_ok(buf, count))
646                 return -EFAULT;
647         while (count-- > 0 && i < 65536) {
648                 char c;
649
650                 if (__get_user(c, tmp)) {
651                         if (tmp > buf)
652                                 break;
653                         return -EFAULT;
654                 }
655                 outb(c, i);
656                 i++;
657                 tmp++;
658         }
659         *ppos = i;
660         return tmp-buf;
661 }
662
663 static ssize_t read_null(struct file *file, char __user *buf,
664                          size_t count, loff_t *ppos)
665 {
666         return 0;
667 }
668
669 static ssize_t write_null(struct file *file, const char __user *buf,
670                           size_t count, loff_t *ppos)
671 {
672         return count;
673 }
674
675 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
676 {
677         return 0;
678 }
679
680 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
681 {
682         size_t count = iov_iter_count(from);
683         iov_iter_advance(from, count);
684         return count;
685 }
686
687 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
688                         struct splice_desc *sd)
689 {
690         return sd->len;
691 }
692
693 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
694                                  loff_t *ppos, size_t len, unsigned int flags)
695 {
696         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
697 }
698
699 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
700 {
701         size_t written = 0;
702
703         while (iov_iter_count(iter)) {
704                 size_t chunk = iov_iter_count(iter), n;
705
706                 if (chunk > PAGE_SIZE)
707                         chunk = PAGE_SIZE;      /* Just for latency reasons */
708                 n = iov_iter_zero(chunk, iter);
709                 if (!n && iov_iter_count(iter))
710                         return written ? written : -EFAULT;
711                 written += n;
712                 if (signal_pending(current))
713                         return written ? written : -ERESTARTSYS;
714                 cond_resched();
715         }
716         return written;
717 }
718
719 static ssize_t read_zero(struct file *file, char __user *buf,
720                          size_t count, loff_t *ppos)
721 {
722         size_t cleared = 0;
723
724         while (count) {
725                 size_t chunk = min_t(size_t, count, PAGE_SIZE);
726                 size_t left;
727
728                 left = clear_user(buf + cleared, chunk);
729                 if (unlikely(left)) {
730                         cleared += (chunk - left);
731                         if (!cleared)
732                                 return -EFAULT;
733                         break;
734                 }
735                 cleared += chunk;
736                 count -= chunk;
737
738                 if (signal_pending(current))
739                         break;
740                 cond_resched();
741         }
742
743         return cleared;
744 }
745
746 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
747 {
748 #ifndef CONFIG_MMU
749         return -ENOSYS;
750 #endif
751         if (vma->vm_flags & VM_SHARED)
752                 return shmem_zero_setup(vma);
753         vma_set_anonymous(vma);
754         return 0;
755 }
756
757 static unsigned long get_unmapped_area_zero(struct file *file,
758                                 unsigned long addr, unsigned long len,
759                                 unsigned long pgoff, unsigned long flags)
760 {
761 #ifdef CONFIG_MMU
762         if (flags & MAP_SHARED) {
763                 /*
764                  * mmap_zero() will call shmem_zero_setup() to create a file,
765                  * so use shmem's get_unmapped_area in case it can be huge;
766                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
767                  * so as not to confuse shmem with our handle on "/dev/zero".
768                  */
769                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
770         }
771
772         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
773         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
774 #else
775         return -ENOSYS;
776 #endif
777 }
778
779 static ssize_t write_full(struct file *file, const char __user *buf,
780                           size_t count, loff_t *ppos)
781 {
782         return -ENOSPC;
783 }
784
785 /*
786  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
787  * can fopen() both devices with "a" now.  This was previously impossible.
788  * -- SRB.
789  */
790 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
791 {
792         return file->f_pos = 0;
793 }
794
795 /*
796  * The memory devices use the full 32/64 bits of the offset, and so we cannot
797  * check against negative addresses: they are ok. The return value is weird,
798  * though, in that case (0).
799  *
800  * also note that seeking relative to the "end of file" isn't supported:
801  * it has no meaning, so it returns -EINVAL.
802  */
803 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
804 {
805         loff_t ret;
806
807         inode_lock(file_inode(file));
808         switch (orig) {
809         case SEEK_CUR:
810                 offset += file->f_pos;
811                 fallthrough;
812         case SEEK_SET:
813                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
814                 if ((unsigned long long)offset >= -MAX_ERRNO) {
815                         ret = -EOVERFLOW;
816                         break;
817                 }
818                 file->f_pos = offset;
819                 ret = file->f_pos;
820                 force_successful_syscall_return();
821                 break;
822         default:
823                 ret = -EINVAL;
824         }
825         inode_unlock(file_inode(file));
826         return ret;
827 }
828
829 static int open_port(struct inode *inode, struct file *filp)
830 {
831         int rc;
832
833         if (!capable(CAP_SYS_RAWIO))
834                 return -EPERM;
835
836         rc = security_locked_down(LOCKDOWN_DEV_MEM);
837         if (rc)
838                 return rc;
839
840         if (iminor(inode) != DEVMEM_MINOR)
841                 return 0;
842
843         /*
844          * Use a unified address space to have a single point to manage
845          * revocations when drivers want to take over a /dev/mem mapped
846          * range.
847          */
848         filp->f_mapping = iomem_get_mapping();
849
850         return 0;
851 }
852
853 #define zero_lseek      null_lseek
854 #define full_lseek      null_lseek
855 #define write_zero      write_null
856 #define write_iter_zero write_iter_null
857 #define open_mem        open_port
858 #define open_kmem       open_mem
859
860 static const struct file_operations __maybe_unused mem_fops = {
861         .llseek         = memory_lseek,
862         .read           = read_mem,
863         .write          = write_mem,
864         .mmap           = mmap_mem,
865         .open           = open_mem,
866 #ifndef CONFIG_MMU
867         .get_unmapped_area = get_unmapped_area_mem,
868         .mmap_capabilities = memory_mmap_capabilities,
869 #endif
870 };
871
872 static const struct file_operations __maybe_unused kmem_fops = {
873         .llseek         = memory_lseek,
874         .read           = read_kmem,
875         .write          = write_kmem,
876         .mmap           = mmap_kmem,
877         .open           = open_kmem,
878 #ifndef CONFIG_MMU
879         .get_unmapped_area = get_unmapped_area_mem,
880         .mmap_capabilities = memory_mmap_capabilities,
881 #endif
882 };
883
884 static const struct file_operations null_fops = {
885         .llseek         = null_lseek,
886         .read           = read_null,
887         .write          = write_null,
888         .read_iter      = read_iter_null,
889         .write_iter     = write_iter_null,
890         .splice_write   = splice_write_null,
891 };
892
893 static const struct file_operations __maybe_unused port_fops = {
894         .llseek         = memory_lseek,
895         .read           = read_port,
896         .write          = write_port,
897         .open           = open_port,
898 };
899
900 static const struct file_operations zero_fops = {
901         .llseek         = zero_lseek,
902         .write          = write_zero,
903         .read_iter      = read_iter_zero,
904         .read           = read_zero,
905         .write_iter     = write_iter_zero,
906         .mmap           = mmap_zero,
907         .get_unmapped_area = get_unmapped_area_zero,
908 #ifndef CONFIG_MMU
909         .mmap_capabilities = zero_mmap_capabilities,
910 #endif
911 };
912
913 static const struct file_operations full_fops = {
914         .llseek         = full_lseek,
915         .read_iter      = read_iter_zero,
916         .write          = write_full,
917 };
918
919 static const struct memdev {
920         const char *name;
921         umode_t mode;
922         const struct file_operations *fops;
923         fmode_t fmode;
924 } devlist[] = {
925 #ifdef CONFIG_DEVMEM
926          [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
927 #endif
928 #ifdef CONFIG_DEVKMEM
929          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
930 #endif
931          [3] = { "null", 0666, &null_fops, 0 },
932 #ifdef CONFIG_DEVPORT
933          [4] = { "port", 0, &port_fops, 0 },
934 #endif
935          [5] = { "zero", 0666, &zero_fops, 0 },
936          [7] = { "full", 0666, &full_fops, 0 },
937          [8] = { "random", 0666, &random_fops, 0 },
938          [9] = { "urandom", 0666, &urandom_fops, 0 },
939 #ifdef CONFIG_PRINTK
940         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
941 #endif
942 };
943
944 static int memory_open(struct inode *inode, struct file *filp)
945 {
946         int minor;
947         const struct memdev *dev;
948
949         minor = iminor(inode);
950         if (minor >= ARRAY_SIZE(devlist))
951                 return -ENXIO;
952
953         dev = &devlist[minor];
954         if (!dev->fops)
955                 return -ENXIO;
956
957         filp->f_op = dev->fops;
958         filp->f_mode |= dev->fmode;
959
960         if (dev->fops->open)
961                 return dev->fops->open(inode, filp);
962
963         return 0;
964 }
965
966 static const struct file_operations memory_fops = {
967         .open = memory_open,
968         .llseek = noop_llseek,
969 };
970
971 static char *mem_devnode(struct device *dev, umode_t *mode)
972 {
973         if (mode && devlist[MINOR(dev->devt)].mode)
974                 *mode = devlist[MINOR(dev->devt)].mode;
975         return NULL;
976 }
977
978 static struct class *mem_class;
979
980 static int __init chr_dev_init(void)
981 {
982         int minor;
983
984         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
985                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
986
987         mem_class = class_create(THIS_MODULE, "mem");
988         if (IS_ERR(mem_class))
989                 return PTR_ERR(mem_class);
990
991         mem_class->devnode = mem_devnode;
992         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
993                 if (!devlist[minor].name)
994                         continue;
995
996                 /*
997                  * Create /dev/port?
998                  */
999                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1000                         continue;
1001
1002                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1003                               NULL, devlist[minor].name);
1004         }
1005
1006         return tty_init();
1007 }
1008
1009 fs_initcall(chr_dev_init);