fs/proc/kcore: don't read offline sections, logically offline pages and hwpoisoned...
[linux-2.6-microblaze.git] / fs / proc / kcore.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *      fs/proc/kcore.c kernel ELF core dumper
4  *
5  *      Modelled on fs/exec.c:aout_core_dump()
6  *      Jeremy Fitzhardinge <jeremy@sw.oz.au>
7  *      ELF version written by David Howells <David.Howells@nexor.co.uk>
8  *      Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9  *      Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10  *      Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11  */
12
13 #include <linux/crash_core.h>
14 #include <linux/mm.h>
15 #include <linux/proc_fs.h>
16 #include <linux/kcore.h>
17 #include <linux/user.h>
18 #include <linux/capability.h>
19 #include <linux/elf.h>
20 #include <linux/elfcore.h>
21 #include <linux/notifier.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
24 #include <linux/printk.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <asm/io.h>
30 #include <linux/list.h>
31 #include <linux/ioport.h>
32 #include <linux/memory.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <asm/sections.h>
36 #include "internal.h"
37
38 #define CORE_STR "CORE"
39
40 #ifndef ELF_CORE_EFLAGS
41 #define ELF_CORE_EFLAGS 0
42 #endif
43
44 static struct proc_dir_entry *proc_root_kcore;
45
46
47 #ifndef kc_vaddr_to_offset
48 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
49 #endif
50 #ifndef kc_offset_to_vaddr
51 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
52 #endif
53
54 static LIST_HEAD(kclist_head);
55 static DECLARE_RWSEM(kclist_lock);
56 static int kcore_need_update = 1;
57
58 /*
59  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
60  * Same as oldmem_pfn_is_ram in vmcore
61  */
62 static int (*mem_pfn_is_ram)(unsigned long pfn);
63
64 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
65 {
66         if (mem_pfn_is_ram)
67                 return -EBUSY;
68         mem_pfn_is_ram = fn;
69         return 0;
70 }
71
72 static int pfn_is_ram(unsigned long pfn)
73 {
74         if (mem_pfn_is_ram)
75                 return mem_pfn_is_ram(pfn);
76         else
77                 return 1;
78 }
79
80 /* This doesn't grab kclist_lock, so it should only be used at init time. */
81 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
82                        int type)
83 {
84         new->addr = (unsigned long)addr;
85         new->size = size;
86         new->type = type;
87
88         list_add_tail(&new->list, &kclist_head);
89 }
90
91 static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
92                              size_t *data_offset)
93 {
94         size_t try, size;
95         struct kcore_list *m;
96
97         *nphdr = 1; /* PT_NOTE */
98         size = 0;
99
100         list_for_each_entry(m, &kclist_head, list) {
101                 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
102                 if (try > size)
103                         size = try;
104                 *nphdr = *nphdr + 1;
105         }
106
107         *phdrs_len = *nphdr * sizeof(struct elf_phdr);
108         *notes_len = (4 * sizeof(struct elf_note) +
109                       3 * ALIGN(sizeof(CORE_STR), 4) +
110                       VMCOREINFO_NOTE_NAME_BYTES +
111                       ALIGN(sizeof(struct elf_prstatus), 4) +
112                       ALIGN(sizeof(struct elf_prpsinfo), 4) +
113                       ALIGN(arch_task_struct_size, 4) +
114                       ALIGN(vmcoreinfo_size, 4));
115         *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
116                                   *notes_len);
117         return *data_offset + size;
118 }
119
120 #ifdef CONFIG_HIGHMEM
121 /*
122  * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
123  * because memory hole is not as big as !HIGHMEM case.
124  * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
125  */
126 static int kcore_ram_list(struct list_head *head)
127 {
128         struct kcore_list *ent;
129
130         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
131         if (!ent)
132                 return -ENOMEM;
133         ent->addr = (unsigned long)__va(0);
134         ent->size = max_low_pfn << PAGE_SHIFT;
135         ent->type = KCORE_RAM;
136         list_add(&ent->list, head);
137         return 0;
138 }
139
140 #else /* !CONFIG_HIGHMEM */
141
142 #ifdef CONFIG_SPARSEMEM_VMEMMAP
143 /* calculate vmemmap's address from given system ram pfn and register it */
144 static int
145 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
146 {
147         unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
148         unsigned long nr_pages = ent->size >> PAGE_SHIFT;
149         unsigned long start, end;
150         struct kcore_list *vmm, *tmp;
151
152
153         start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
154         end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
155         end = PAGE_ALIGN(end);
156         /* overlap check (because we have to align page */
157         list_for_each_entry(tmp, head, list) {
158                 if (tmp->type != KCORE_VMEMMAP)
159                         continue;
160                 if (start < tmp->addr + tmp->size)
161                         if (end > tmp->addr)
162                                 end = tmp->addr;
163         }
164         if (start < end) {
165                 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
166                 if (!vmm)
167                         return 0;
168                 vmm->addr = start;
169                 vmm->size = end - start;
170                 vmm->type = KCORE_VMEMMAP;
171                 list_add_tail(&vmm->list, head);
172         }
173         return 1;
174
175 }
176 #else
177 static int
178 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
179 {
180         return 1;
181 }
182
183 #endif
184
185 static int
186 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
187 {
188         struct list_head *head = (struct list_head *)arg;
189         struct kcore_list *ent;
190         struct page *p;
191
192         if (!pfn_valid(pfn))
193                 return 1;
194
195         p = pfn_to_page(pfn);
196
197         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
198         if (!ent)
199                 return -ENOMEM;
200         ent->addr = (unsigned long)page_to_virt(p);
201         ent->size = nr_pages << PAGE_SHIFT;
202
203         if (!virt_addr_valid(ent->addr))
204                 goto free_out;
205
206         /* cut not-mapped area. ....from ppc-32 code. */
207         if (ULONG_MAX - ent->addr < ent->size)
208                 ent->size = ULONG_MAX - ent->addr;
209
210         /*
211          * We've already checked virt_addr_valid so we know this address
212          * is a valid pointer, therefore we can check against it to determine
213          * if we need to trim
214          */
215         if (VMALLOC_START > ent->addr) {
216                 if (VMALLOC_START - ent->addr < ent->size)
217                         ent->size = VMALLOC_START - ent->addr;
218         }
219
220         ent->type = KCORE_RAM;
221         list_add_tail(&ent->list, head);
222
223         if (!get_sparsemem_vmemmap_info(ent, head)) {
224                 list_del(&ent->list);
225                 goto free_out;
226         }
227
228         return 0;
229 free_out:
230         kfree(ent);
231         return 1;
232 }
233
234 static int kcore_ram_list(struct list_head *list)
235 {
236         int nid, ret;
237         unsigned long end_pfn;
238
239         /* Not inialized....update now */
240         /* find out "max pfn" */
241         end_pfn = 0;
242         for_each_node_state(nid, N_MEMORY) {
243                 unsigned long node_end;
244                 node_end = node_end_pfn(nid);
245                 if (end_pfn < node_end)
246                         end_pfn = node_end;
247         }
248         /* scan 0 to max_pfn */
249         ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
250         if (ret)
251                 return -ENOMEM;
252         return 0;
253 }
254 #endif /* CONFIG_HIGHMEM */
255
256 static int kcore_update_ram(void)
257 {
258         LIST_HEAD(list);
259         LIST_HEAD(garbage);
260         int nphdr;
261         size_t phdrs_len, notes_len, data_offset;
262         struct kcore_list *tmp, *pos;
263         int ret = 0;
264
265         down_write(&kclist_lock);
266         if (!xchg(&kcore_need_update, 0))
267                 goto out;
268
269         ret = kcore_ram_list(&list);
270         if (ret) {
271                 /* Couldn't get the RAM list, try again next time. */
272                 WRITE_ONCE(kcore_need_update, 1);
273                 list_splice_tail(&list, &garbage);
274                 goto out;
275         }
276
277         list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
278                 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
279                         list_move(&pos->list, &garbage);
280         }
281         list_splice_tail(&list, &kclist_head);
282
283         proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
284                                                &data_offset);
285
286 out:
287         up_write(&kclist_lock);
288         list_for_each_entry_safe(pos, tmp, &garbage, list) {
289                 list_del(&pos->list);
290                 kfree(pos);
291         }
292         return ret;
293 }
294
295 static void append_kcore_note(char *notes, size_t *i, const char *name,
296                               unsigned int type, const void *desc,
297                               size_t descsz)
298 {
299         struct elf_note *note = (struct elf_note *)&notes[*i];
300
301         note->n_namesz = strlen(name) + 1;
302         note->n_descsz = descsz;
303         note->n_type = type;
304         *i += sizeof(*note);
305         memcpy(&notes[*i], name, note->n_namesz);
306         *i = ALIGN(*i + note->n_namesz, 4);
307         memcpy(&notes[*i], desc, descsz);
308         *i = ALIGN(*i + descsz, 4);
309 }
310
311 static ssize_t
312 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
313 {
314         char *buf = file->private_data;
315         size_t phdrs_offset, notes_offset, data_offset;
316         size_t phdrs_len, notes_len;
317         struct kcore_list *m;
318         size_t tsz;
319         int nphdr;
320         unsigned long start;
321         size_t orig_buflen = buflen;
322         int ret = 0;
323
324         down_read(&kclist_lock);
325
326         get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
327         phdrs_offset = sizeof(struct elfhdr);
328         notes_offset = phdrs_offset + phdrs_len;
329
330         /* ELF file header. */
331         if (buflen && *fpos < sizeof(struct elfhdr)) {
332                 struct elfhdr ehdr = {
333                         .e_ident = {
334                                 [EI_MAG0] = ELFMAG0,
335                                 [EI_MAG1] = ELFMAG1,
336                                 [EI_MAG2] = ELFMAG2,
337                                 [EI_MAG3] = ELFMAG3,
338                                 [EI_CLASS] = ELF_CLASS,
339                                 [EI_DATA] = ELF_DATA,
340                                 [EI_VERSION] = EV_CURRENT,
341                                 [EI_OSABI] = ELF_OSABI,
342                         },
343                         .e_type = ET_CORE,
344                         .e_machine = ELF_ARCH,
345                         .e_version = EV_CURRENT,
346                         .e_phoff = sizeof(struct elfhdr),
347                         .e_flags = ELF_CORE_EFLAGS,
348                         .e_ehsize = sizeof(struct elfhdr),
349                         .e_phentsize = sizeof(struct elf_phdr),
350                         .e_phnum = nphdr,
351                 };
352
353                 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
354                 if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
355                         ret = -EFAULT;
356                         goto out;
357                 }
358
359                 buffer += tsz;
360                 buflen -= tsz;
361                 *fpos += tsz;
362         }
363
364         /* ELF program headers. */
365         if (buflen && *fpos < phdrs_offset + phdrs_len) {
366                 struct elf_phdr *phdrs, *phdr;
367
368                 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
369                 if (!phdrs) {
370                         ret = -ENOMEM;
371                         goto out;
372                 }
373
374                 phdrs[0].p_type = PT_NOTE;
375                 phdrs[0].p_offset = notes_offset;
376                 phdrs[0].p_filesz = notes_len;
377
378                 phdr = &phdrs[1];
379                 list_for_each_entry(m, &kclist_head, list) {
380                         phdr->p_type = PT_LOAD;
381                         phdr->p_flags = PF_R | PF_W | PF_X;
382                         phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
383                         phdr->p_vaddr = (size_t)m->addr;
384                         if (m->type == KCORE_RAM)
385                                 phdr->p_paddr = __pa(m->addr);
386                         else if (m->type == KCORE_TEXT)
387                                 phdr->p_paddr = __pa_symbol(m->addr);
388                         else
389                                 phdr->p_paddr = (elf_addr_t)-1;
390                         phdr->p_filesz = phdr->p_memsz = m->size;
391                         phdr->p_align = PAGE_SIZE;
392                         phdr++;
393                 }
394
395                 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
396                 if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
397                                  tsz)) {
398                         kfree(phdrs);
399                         ret = -EFAULT;
400                         goto out;
401                 }
402                 kfree(phdrs);
403
404                 buffer += tsz;
405                 buflen -= tsz;
406                 *fpos += tsz;
407         }
408
409         /* ELF note segment. */
410         if (buflen && *fpos < notes_offset + notes_len) {
411                 struct elf_prstatus prstatus = {};
412                 struct elf_prpsinfo prpsinfo = {
413                         .pr_sname = 'R',
414                         .pr_fname = "vmlinux",
415                 };
416                 char *notes;
417                 size_t i = 0;
418
419                 strlcpy(prpsinfo.pr_psargs, saved_command_line,
420                         sizeof(prpsinfo.pr_psargs));
421
422                 notes = kzalloc(notes_len, GFP_KERNEL);
423                 if (!notes) {
424                         ret = -ENOMEM;
425                         goto out;
426                 }
427
428                 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
429                                   sizeof(prstatus));
430                 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
431                                   sizeof(prpsinfo));
432                 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
433                                   arch_task_struct_size);
434                 /*
435                  * vmcoreinfo_size is mostly constant after init time, but it
436                  * can be changed by crash_save_vmcoreinfo(). Racing here with a
437                  * panic on another CPU before the machine goes down is insanely
438                  * unlikely, but it's better to not leave potential buffer
439                  * overflows lying around, regardless.
440                  */
441                 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
442                                   vmcoreinfo_data,
443                                   min(vmcoreinfo_size, notes_len - i));
444
445                 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
446                 if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
447                         kfree(notes);
448                         ret = -EFAULT;
449                         goto out;
450                 }
451                 kfree(notes);
452
453                 buffer += tsz;
454                 buflen -= tsz;
455                 *fpos += tsz;
456         }
457
458         /*
459          * Check to see if our file offset matches with any of
460          * the addresses in the elf_phdr on our list.
461          */
462         start = kc_offset_to_vaddr(*fpos - data_offset);
463         if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
464                 tsz = buflen;
465
466         m = NULL;
467         while (buflen) {
468                 struct page *page;
469                 unsigned long pfn;
470
471                 /*
472                  * If this is the first iteration or the address is not within
473                  * the previous entry, search for a matching entry.
474                  */
475                 if (!m || start < m->addr || start >= m->addr + m->size) {
476                         list_for_each_entry(m, &kclist_head, list) {
477                                 if (start >= m->addr &&
478                                     start < m->addr + m->size)
479                                         break;
480                         }
481                 }
482
483                 if (&m->list == &kclist_head) {
484                         if (clear_user(buffer, tsz)) {
485                                 ret = -EFAULT;
486                                 goto out;
487                         }
488                         m = NULL;       /* skip the list anchor */
489                         goto skip;
490                 }
491
492                 switch (m->type) {
493                 case KCORE_VMALLOC:
494                         vread(buf, (char *)start, tsz);
495                         /* we have to zero-fill user buffer even if no read */
496                         if (copy_to_user(buffer, buf, tsz)) {
497                                 ret = -EFAULT;
498                                 goto out;
499                         }
500                         break;
501                 case KCORE_USER:
502                         /* User page is handled prior to normal kernel page: */
503                         if (copy_to_user(buffer, (char *)start, tsz)) {
504                                 ret = -EFAULT;
505                                 goto out;
506                         }
507                         break;
508                 case KCORE_RAM:
509                         pfn = __pa(start) >> PAGE_SHIFT;
510                         page = pfn_to_online_page(pfn);
511
512                         /*
513                          * Don't read offline sections, logically offline pages
514                          * (e.g., inflated in a balloon), hwpoisoned pages,
515                          * and explicitly excluded physical ranges.
516                          */
517                         if (!page || PageOffline(page) ||
518                             is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
519                                 if (clear_user(buffer, tsz)) {
520                                         ret = -EFAULT;
521                                         goto out;
522                                 }
523                                 break;
524                         }
525                         fallthrough;
526                 case KCORE_VMEMMAP:
527                 case KCORE_TEXT:
528                         if (kern_addr_valid(start)) {
529                                 /*
530                                  * Using bounce buffer to bypass the
531                                  * hardened user copy kernel text checks.
532                                  */
533                                 if (copy_from_kernel_nofault(buf, (void *)start,
534                                                 tsz)) {
535                                         if (clear_user(buffer, tsz)) {
536                                                 ret = -EFAULT;
537                                                 goto out;
538                                         }
539                                 } else {
540                                         if (copy_to_user(buffer, buf, tsz)) {
541                                                 ret = -EFAULT;
542                                                 goto out;
543                                         }
544                                 }
545                         } else {
546                                 if (clear_user(buffer, tsz)) {
547                                         ret = -EFAULT;
548                                         goto out;
549                                 }
550                         }
551                         break;
552                 default:
553                         pr_warn_once("Unhandled KCORE type: %d\n", m->type);
554                         if (clear_user(buffer, tsz)) {
555                                 ret = -EFAULT;
556                                 goto out;
557                         }
558                 }
559 skip:
560                 buflen -= tsz;
561                 *fpos += tsz;
562                 buffer += tsz;
563                 start += tsz;
564                 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
565         }
566
567 out:
568         up_read(&kclist_lock);
569         if (ret)
570                 return ret;
571         return orig_buflen - buflen;
572 }
573
574 static int open_kcore(struct inode *inode, struct file *filp)
575 {
576         int ret = security_locked_down(LOCKDOWN_KCORE);
577
578         if (!capable(CAP_SYS_RAWIO))
579                 return -EPERM;
580
581         if (ret)
582                 return ret;
583
584         filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
585         if (!filp->private_data)
586                 return -ENOMEM;
587
588         if (kcore_need_update)
589                 kcore_update_ram();
590         if (i_size_read(inode) != proc_root_kcore->size) {
591                 inode_lock(inode);
592                 i_size_write(inode, proc_root_kcore->size);
593                 inode_unlock(inode);
594         }
595         return 0;
596 }
597
598 static int release_kcore(struct inode *inode, struct file *file)
599 {
600         kfree(file->private_data);
601         return 0;
602 }
603
604 static const struct proc_ops kcore_proc_ops = {
605         .proc_read      = read_kcore,
606         .proc_open      = open_kcore,
607         .proc_release   = release_kcore,
608         .proc_lseek     = default_llseek,
609 };
610
611 /* just remember that we have to update kcore */
612 static int __meminit kcore_callback(struct notifier_block *self,
613                                     unsigned long action, void *arg)
614 {
615         switch (action) {
616         case MEM_ONLINE:
617         case MEM_OFFLINE:
618                 kcore_need_update = 1;
619                 break;
620         }
621         return NOTIFY_OK;
622 }
623
624 static struct notifier_block kcore_callback_nb __meminitdata = {
625         .notifier_call = kcore_callback,
626         .priority = 0,
627 };
628
629 static struct kcore_list kcore_vmalloc;
630
631 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
632 static struct kcore_list kcore_text;
633 /*
634  * If defined, special segment is used for mapping kernel text instead of
635  * direct-map area. We need to create special TEXT section.
636  */
637 static void __init proc_kcore_text_init(void)
638 {
639         kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
640 }
641 #else
642 static void __init proc_kcore_text_init(void)
643 {
644 }
645 #endif
646
647 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
648 /*
649  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
650  */
651 static struct kcore_list kcore_modules;
652 static void __init add_modules_range(void)
653 {
654         if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
655                 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
656                         MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
657         }
658 }
659 #else
660 static void __init add_modules_range(void)
661 {
662 }
663 #endif
664
665 static int __init proc_kcore_init(void)
666 {
667         proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
668         if (!proc_root_kcore) {
669                 pr_err("couldn't create /proc/kcore\n");
670                 return 0; /* Always returns 0. */
671         }
672         /* Store text area if it's special */
673         proc_kcore_text_init();
674         /* Store vmalloc area */
675         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
676                 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
677         add_modules_range();
678         /* Store direct-map area from physical memory map */
679         kcore_update_ram();
680         register_hotmemory_notifier(&kcore_callback_nb);
681
682         return 0;
683 }
684 fs_initcall(proc_kcore_init);