2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/vmalloc.h>
25 #include <linux/pagemap.h>
26 #include <linux/uaccess.h>
30 /* List representing chunks of contiguous memory areas and their offsets in
33 static LIST_HEAD(vmcore_list);
35 /* Stores the pointer to the buffer containing kernel elf core headers. */
36 static char *elfcorebuf;
37 static size_t elfcorebuf_sz;
38 static size_t elfcorebuf_sz_orig;
40 static char *elfnotes_buf;
41 static size_t elfnotes_sz;
42 /* Size of all notes minus the device dump notes */
43 static size_t elfnotes_orig_sz;
45 /* Total size of vmcore file. */
46 static u64 vmcore_size;
48 static struct proc_dir_entry *proc_vmcore;
50 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
51 /* Device Dump list and mutex to synchronize access to list */
52 static LIST_HEAD(vmcoredd_list);
53 static DEFINE_MUTEX(vmcoredd_mutex);
54 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
56 /* Device Dump Size */
57 static size_t vmcoredd_orig_sz;
60 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
61 * The called function has to take care of module refcounting.
63 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
65 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
67 if (oldmem_pfn_is_ram)
69 oldmem_pfn_is_ram = fn;
72 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
74 void unregister_oldmem_pfn_is_ram(void)
76 oldmem_pfn_is_ram = NULL;
79 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
81 static int pfn_is_ram(unsigned long pfn)
83 int (*fn)(unsigned long pfn);
84 /* pfn is ram unless fn() checks pagetype */
88 * Ask hypervisor if the pfn is really ram.
89 * A ballooned page contains no data and reading from such a page
90 * will cause high load in the hypervisor.
92 fn = oldmem_pfn_is_ram;
99 /* Reads a page from the oldmem device from given offset. */
100 static ssize_t read_from_oldmem(char *buf, size_t count,
101 u64 *ppos, int userbuf)
103 unsigned long pfn, offset;
105 ssize_t read = 0, tmp;
110 offset = (unsigned long)(*ppos % PAGE_SIZE);
111 pfn = (unsigned long)(*ppos / PAGE_SIZE);
114 if (count > (PAGE_SIZE - offset))
115 nr_bytes = PAGE_SIZE - offset;
119 /* If pfn is not ram, return zeros for sparse dump files */
120 if (pfn_is_ram(pfn) == 0)
121 memset(buf, 0, nr_bytes);
123 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
140 * Architectures may override this function to allocate ELF header in 2nd kernel
142 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
148 * Architectures may override this function to free header
150 void __weak elfcorehdr_free(unsigned long long addr)
154 * Architectures may override this function to read from ELF header
156 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
158 return read_from_oldmem(buf, count, ppos, 0);
162 * Architectures may override this function to read from notes sections
164 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
166 return read_from_oldmem(buf, count, ppos, 0);
170 * Architectures may override this function to map oldmem
172 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
173 unsigned long from, unsigned long pfn,
174 unsigned long size, pgprot_t prot)
176 return remap_pfn_range(vma, from, pfn, size, prot);
180 * Copy to either kernel or user space
182 static int copy_to(void *target, void *src, size_t size, int userbuf)
185 if (copy_to_user((char __user *) target, src, size))
188 memcpy(target, src, size);
193 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
194 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
196 struct vmcoredd_node *dump;
202 mutex_lock(&vmcoredd_mutex);
203 list_for_each_entry(dump, &vmcoredd_list, list) {
204 if (start < offset + dump->size) {
205 tsz = min(offset + (u64)dump->size - start, (u64)size);
206 buf = dump->buf + start - offset;
207 if (copy_to(dst, buf, tsz, userbuf)) {
216 /* Leave now if buffer filled already */
220 offset += dump->size;
224 mutex_unlock(&vmcoredd_mutex);
228 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
229 u64 start, size_t size)
231 struct vmcoredd_node *dump;
237 mutex_lock(&vmcoredd_mutex);
238 list_for_each_entry(dump, &vmcoredd_list, list) {
239 if (start < offset + dump->size) {
240 tsz = min(offset + (u64)dump->size - start, (u64)size);
241 buf = dump->buf + start - offset;
242 if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
251 /* Leave now if buffer filled already */
255 offset += dump->size;
259 mutex_unlock(&vmcoredd_mutex);
262 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
264 /* Read from the ELF header and then the crash dump. On error, negative value is
265 * returned otherwise number of bytes read are returned.
267 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
270 ssize_t acc = 0, tmp;
273 struct vmcore *m = NULL;
275 if (buflen == 0 || *fpos >= vmcore_size)
278 /* trim buflen to not go beyond EOF */
279 if (buflen > vmcore_size - *fpos)
280 buflen = vmcore_size - *fpos;
282 /* Read ELF core header */
283 if (*fpos < elfcorebuf_sz) {
284 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
285 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
292 /* leave now if filled buffer already */
297 /* Read Elf note segment */
298 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
301 /* We add device dumps before other elf notes because the
302 * other elf notes may not fill the elf notes buffer
303 * completely and we will end up with zero-filled data
304 * between the elf notes and the device dumps. Tools will
305 * then try to decode this zero-filled data as valid notes
306 * and we don't want that. Hence, adding device dumps before
307 * the other elf notes ensure that zero-filled data can be
310 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
311 /* Read device dumps */
312 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
313 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
314 (size_t)*fpos, buflen);
315 start = *fpos - elfcorebuf_sz;
316 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
324 /* leave now if filled buffer already */
328 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
330 /* Read remaining elf notes */
331 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
332 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
333 if (copy_to(buffer, kaddr, tsz, userbuf))
341 /* leave now if filled buffer already */
346 list_for_each_entry(m, &vmcore_list, list) {
347 if (*fpos < m->offset + m->size) {
348 tsz = (size_t)min_t(unsigned long long,
349 m->offset + m->size - *fpos,
351 start = m->paddr + *fpos - m->offset;
352 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
360 /* leave now if filled buffer already */
369 static ssize_t read_vmcore(struct file *file, char __user *buffer,
370 size_t buflen, loff_t *fpos)
372 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
376 * The vmcore fault handler uses the page cache and fills data using the
377 * standard __vmcore_read() function.
379 * On s390 the fault handler is used for memory regions that can't be mapped
380 * directly with remap_pfn_range().
382 static int mmap_vmcore_fault(struct vm_fault *vmf)
385 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
386 pgoff_t index = vmf->pgoff;
392 page = find_or_create_page(mapping, index, GFP_KERNEL);
395 if (!PageUptodate(page)) {
396 offset = (loff_t) index << PAGE_SHIFT;
397 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
398 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
402 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
404 SetPageUptodate(page);
410 return VM_FAULT_SIGBUS;
414 static const struct vm_operations_struct vmcore_mmap_ops = {
415 .fault = mmap_vmcore_fault,
419 * vmcore_alloc_buf - allocate buffer in vmalloc memory
420 * @sizez: size of buffer
422 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
423 * the buffer to user-space by means of remap_vmalloc_range().
425 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
426 * disabled and there's no need to allow users to mmap the buffer.
428 static inline char *vmcore_alloc_buf(size_t size)
431 return vmalloc_user(size);
433 return vzalloc(size);
438 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
439 * essential for mmap_vmcore() in order to map physically
440 * non-contiguous objects (ELF header, ELF note segment and memory
441 * regions in the 1st kernel pointed to by PT_LOAD entries) into
442 * virtually contiguous user-space in ELF layout.
446 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
447 * reported as not being ram with the zero page.
449 * @vma: vm_area_struct describing requested mapping
450 * @from: start remapping from
451 * @pfn: page frame number to start remapping to
452 * @size: remapping size
453 * @prot: protection bits
455 * Returns zero on success, -EAGAIN on failure.
457 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
458 unsigned long from, unsigned long pfn,
459 unsigned long size, pgprot_t prot)
461 unsigned long map_size;
462 unsigned long pos_start, pos_end, pos;
463 unsigned long zeropage_pfn = my_zero_pfn(0);
467 pos_end = pfn + (size >> PAGE_SHIFT);
469 for (pos = pos_start; pos < pos_end; ++pos) {
470 if (!pfn_is_ram(pos)) {
472 * We hit a page which is not ram. Remap the continuous
473 * region between pos_start and pos-1 and replace
474 * the non-ram page at pos with the zero page.
476 if (pos > pos_start) {
477 /* Remap continuous region */
478 map_size = (pos - pos_start) << PAGE_SHIFT;
479 if (remap_oldmem_pfn_range(vma, from + len,
485 /* Remap the zero page */
486 if (remap_oldmem_pfn_range(vma, from + len,
494 if (pos > pos_start) {
496 map_size = (pos - pos_start) << PAGE_SHIFT;
497 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
503 do_munmap(vma->vm_mm, from, len, NULL);
507 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
508 unsigned long from, unsigned long pfn,
509 unsigned long size, pgprot_t prot)
512 * Check if oldmem_pfn_is_ram was registered to avoid
513 * looping over all pages without a reason.
515 if (oldmem_pfn_is_ram)
516 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
518 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
521 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
523 size_t size = vma->vm_end - vma->vm_start;
524 u64 start, end, len, tsz;
527 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
530 if (size > vmcore_size || end > vmcore_size)
533 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
536 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
537 vma->vm_flags |= VM_MIXEDMAP;
538 vma->vm_ops = &vmcore_mmap_ops;
542 if (start < elfcorebuf_sz) {
545 tsz = min(elfcorebuf_sz - (size_t)start, size);
546 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
547 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
558 if (start < elfcorebuf_sz + elfnotes_sz) {
561 /* We add device dumps before other elf notes because the
562 * other elf notes may not fill the elf notes buffer
563 * completely and we will end up with zero-filled data
564 * between the elf notes and the device dumps. Tools will
565 * then try to decode this zero-filled data as valid notes
566 * and we don't want that. Hence, adding device dumps before
567 * the other elf notes ensure that zero-filled data can be
568 * avoided. This also ensures that the device dumps and
569 * other elf notes can be properly mmaped at page aligned
572 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
573 /* Read device dumps */
574 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
577 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
578 (size_t)start, size);
579 start_off = start - elfcorebuf_sz;
580 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
588 /* leave now if filled buffer already */
592 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
594 /* Read remaining elf notes */
595 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
596 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
597 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
609 list_for_each_entry(m, &vmcore_list, list) {
610 if (start < m->offset + m->size) {
613 tsz = (size_t)min_t(unsigned long long,
614 m->offset + m->size - start, size);
615 paddr = m->paddr + start - m->offset;
616 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
617 paddr >> PAGE_SHIFT, tsz,
631 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
635 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
641 static const struct file_operations proc_vmcore_operations = {
643 .llseek = default_llseek,
647 static struct vmcore* __init get_new_element(void)
649 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
652 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
653 struct list_head *vc_list)
658 size = elfsz + elfnotesegsz;
659 list_for_each_entry(m, vc_list, list) {
666 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
668 * @ehdr_ptr: ELF header
670 * This function updates p_memsz member of each PT_NOTE entry in the
671 * program header table pointed to by @ehdr_ptr to real size of ELF
674 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
677 Elf64_Phdr *phdr_ptr;
678 Elf64_Nhdr *nhdr_ptr;
680 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
681 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
683 u64 offset, max_sz, sz, real_sz = 0;
684 if (phdr_ptr->p_type != PT_NOTE)
686 max_sz = phdr_ptr->p_memsz;
687 offset = phdr_ptr->p_offset;
688 notes_section = kmalloc(max_sz, GFP_KERNEL);
691 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
693 kfree(notes_section);
696 nhdr_ptr = notes_section;
697 while (nhdr_ptr->n_namesz != 0) {
698 sz = sizeof(Elf64_Nhdr) +
699 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
700 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
701 if ((real_sz + sz) > max_sz) {
702 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
703 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
707 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
709 kfree(notes_section);
710 phdr_ptr->p_memsz = real_sz;
712 pr_warn("Warning: Zero PT_NOTE entries found\n");
720 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
721 * headers and sum of real size of their ELF note segment headers and
724 * @ehdr_ptr: ELF header
725 * @nr_ptnote: buffer for the number of PT_NOTE program headers
726 * @sz_ptnote: buffer for size of unique PT_NOTE program header
728 * This function is used to merge multiple PT_NOTE program headers
729 * into a unique single one. The resulting unique entry will have
730 * @sz_ptnote in its phdr->p_mem.
732 * It is assumed that program headers with PT_NOTE type pointed to by
733 * @ehdr_ptr has already been updated by update_note_header_size_elf64
734 * and each of PT_NOTE program headers has actual ELF note segment
735 * size in its p_memsz member.
737 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
738 int *nr_ptnote, u64 *sz_ptnote)
741 Elf64_Phdr *phdr_ptr;
743 *nr_ptnote = *sz_ptnote = 0;
745 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
746 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
747 if (phdr_ptr->p_type != PT_NOTE)
750 *sz_ptnote += phdr_ptr->p_memsz;
757 * copy_notes_elf64 - copy ELF note segments in a given buffer
759 * @ehdr_ptr: ELF header
760 * @notes_buf: buffer into which ELF note segments are copied
762 * This function is used to copy ELF note segment in the 1st kernel
763 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
764 * size of the buffer @notes_buf is equal to or larger than sum of the
765 * real ELF note segment headers and data.
767 * It is assumed that program headers with PT_NOTE type pointed to by
768 * @ehdr_ptr has already been updated by update_note_header_size_elf64
769 * and each of PT_NOTE program headers has actual ELF note segment
770 * size in its p_memsz member.
772 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
775 Elf64_Phdr *phdr_ptr;
777 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
779 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
781 if (phdr_ptr->p_type != PT_NOTE)
783 offset = phdr_ptr->p_offset;
784 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
788 notes_buf += phdr_ptr->p_memsz;
794 /* Merges all the PT_NOTE headers into one. */
795 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
796 char **notes_buf, size_t *notes_sz)
798 int i, nr_ptnote=0, rc=0;
800 Elf64_Ehdr *ehdr_ptr;
802 u64 phdr_sz = 0, note_off;
804 ehdr_ptr = (Elf64_Ehdr *)elfptr;
806 rc = update_note_header_size_elf64(ehdr_ptr);
810 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
814 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
815 *notes_buf = vmcore_alloc_buf(*notes_sz);
819 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
823 /* Prepare merged PT_NOTE program header. */
824 phdr.p_type = PT_NOTE;
826 note_off = sizeof(Elf64_Ehdr) +
827 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
828 phdr.p_offset = roundup(note_off, PAGE_SIZE);
829 phdr.p_vaddr = phdr.p_paddr = 0;
830 phdr.p_filesz = phdr.p_memsz = phdr_sz;
833 /* Add merged PT_NOTE program header*/
834 tmp = elfptr + sizeof(Elf64_Ehdr);
835 memcpy(tmp, &phdr, sizeof(phdr));
838 /* Remove unwanted PT_NOTE program headers. */
839 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
841 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
842 memset(elfptr + *elfsz, 0, i);
843 *elfsz = roundup(*elfsz, PAGE_SIZE);
845 /* Modify e_phnum to reflect merged headers. */
846 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
848 /* Store the size of all notes. We need this to update the note
849 * header when the device dumps will be added.
851 elfnotes_orig_sz = phdr.p_memsz;
857 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
859 * @ehdr_ptr: ELF header
861 * This function updates p_memsz member of each PT_NOTE entry in the
862 * program header table pointed to by @ehdr_ptr to real size of ELF
865 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
868 Elf32_Phdr *phdr_ptr;
869 Elf32_Nhdr *nhdr_ptr;
871 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
872 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
874 u64 offset, max_sz, sz, real_sz = 0;
875 if (phdr_ptr->p_type != PT_NOTE)
877 max_sz = phdr_ptr->p_memsz;
878 offset = phdr_ptr->p_offset;
879 notes_section = kmalloc(max_sz, GFP_KERNEL);
882 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
884 kfree(notes_section);
887 nhdr_ptr = notes_section;
888 while (nhdr_ptr->n_namesz != 0) {
889 sz = sizeof(Elf32_Nhdr) +
890 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
891 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
892 if ((real_sz + sz) > max_sz) {
893 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
894 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
898 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
900 kfree(notes_section);
901 phdr_ptr->p_memsz = real_sz;
903 pr_warn("Warning: Zero PT_NOTE entries found\n");
911 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
912 * headers and sum of real size of their ELF note segment headers and
915 * @ehdr_ptr: ELF header
916 * @nr_ptnote: buffer for the number of PT_NOTE program headers
917 * @sz_ptnote: buffer for size of unique PT_NOTE program header
919 * This function is used to merge multiple PT_NOTE program headers
920 * into a unique single one. The resulting unique entry will have
921 * @sz_ptnote in its phdr->p_mem.
923 * It is assumed that program headers with PT_NOTE type pointed to by
924 * @ehdr_ptr has already been updated by update_note_header_size_elf32
925 * and each of PT_NOTE program headers has actual ELF note segment
926 * size in its p_memsz member.
928 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
929 int *nr_ptnote, u64 *sz_ptnote)
932 Elf32_Phdr *phdr_ptr;
934 *nr_ptnote = *sz_ptnote = 0;
936 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
937 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
938 if (phdr_ptr->p_type != PT_NOTE)
941 *sz_ptnote += phdr_ptr->p_memsz;
948 * copy_notes_elf32 - copy ELF note segments in a given buffer
950 * @ehdr_ptr: ELF header
951 * @notes_buf: buffer into which ELF note segments are copied
953 * This function is used to copy ELF note segment in the 1st kernel
954 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
955 * size of the buffer @notes_buf is equal to or larger than sum of the
956 * real ELF note segment headers and data.
958 * It is assumed that program headers with PT_NOTE type pointed to by
959 * @ehdr_ptr has already been updated by update_note_header_size_elf32
960 * and each of PT_NOTE program headers has actual ELF note segment
961 * size in its p_memsz member.
963 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
966 Elf32_Phdr *phdr_ptr;
968 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
970 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
972 if (phdr_ptr->p_type != PT_NOTE)
974 offset = phdr_ptr->p_offset;
975 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
979 notes_buf += phdr_ptr->p_memsz;
985 /* Merges all the PT_NOTE headers into one. */
986 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
987 char **notes_buf, size_t *notes_sz)
989 int i, nr_ptnote=0, rc=0;
991 Elf32_Ehdr *ehdr_ptr;
993 u64 phdr_sz = 0, note_off;
995 ehdr_ptr = (Elf32_Ehdr *)elfptr;
997 rc = update_note_header_size_elf32(ehdr_ptr);
1001 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1005 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1006 *notes_buf = vmcore_alloc_buf(*notes_sz);
1010 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1014 /* Prepare merged PT_NOTE program header. */
1015 phdr.p_type = PT_NOTE;
1017 note_off = sizeof(Elf32_Ehdr) +
1018 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1019 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1020 phdr.p_vaddr = phdr.p_paddr = 0;
1021 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1024 /* Add merged PT_NOTE program header*/
1025 tmp = elfptr + sizeof(Elf32_Ehdr);
1026 memcpy(tmp, &phdr, sizeof(phdr));
1027 tmp += sizeof(phdr);
1029 /* Remove unwanted PT_NOTE program headers. */
1030 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1031 *elfsz = *elfsz - i;
1032 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1033 memset(elfptr + *elfsz, 0, i);
1034 *elfsz = roundup(*elfsz, PAGE_SIZE);
1036 /* Modify e_phnum to reflect merged headers. */
1037 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1039 /* Store the size of all notes. We need this to update the note
1040 * header when the device dumps will be added.
1042 elfnotes_orig_sz = phdr.p_memsz;
1047 /* Add memory chunks represented by program headers to vmcore list. Also update
1048 * the new offset fields of exported program headers. */
1049 static int __init process_ptload_program_headers_elf64(char *elfptr,
1052 struct list_head *vc_list)
1055 Elf64_Ehdr *ehdr_ptr;
1056 Elf64_Phdr *phdr_ptr;
1060 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1061 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1063 /* Skip Elf header, program headers and Elf note segment. */
1064 vmcore_off = elfsz + elfnotes_sz;
1066 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1067 u64 paddr, start, end, size;
1069 if (phdr_ptr->p_type != PT_LOAD)
1072 paddr = phdr_ptr->p_offset;
1073 start = rounddown(paddr, PAGE_SIZE);
1074 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1077 /* Add this contiguous chunk of memory to vmcore list.*/
1078 new = get_new_element();
1083 list_add_tail(&new->list, vc_list);
1085 /* Update the program header offset. */
1086 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1087 vmcore_off = vmcore_off + size;
1092 static int __init process_ptload_program_headers_elf32(char *elfptr,
1095 struct list_head *vc_list)
1098 Elf32_Ehdr *ehdr_ptr;
1099 Elf32_Phdr *phdr_ptr;
1103 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1104 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1106 /* Skip Elf header, program headers and Elf note segment. */
1107 vmcore_off = elfsz + elfnotes_sz;
1109 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1110 u64 paddr, start, end, size;
1112 if (phdr_ptr->p_type != PT_LOAD)
1115 paddr = phdr_ptr->p_offset;
1116 start = rounddown(paddr, PAGE_SIZE);
1117 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1120 /* Add this contiguous chunk of memory to vmcore list.*/
1121 new = get_new_element();
1126 list_add_tail(&new->list, vc_list);
1128 /* Update the program header offset */
1129 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1130 vmcore_off = vmcore_off + size;
1135 /* Sets offset fields of vmcore elements. */
1136 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1137 struct list_head *vc_list)
1142 /* Skip Elf header, program headers and Elf note segment. */
1143 vmcore_off = elfsz + elfnotes_sz;
1145 list_for_each_entry(m, vc_list, list) {
1146 m->offset = vmcore_off;
1147 vmcore_off += m->size;
1151 static void free_elfcorebuf(void)
1153 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1155 vfree(elfnotes_buf);
1156 elfnotes_buf = NULL;
1159 static int __init parse_crash_elf64_headers(void)
1165 addr = elfcorehdr_addr;
1167 /* Read Elf header */
1168 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1172 /* Do some basic Verification. */
1173 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1174 (ehdr.e_type != ET_CORE) ||
1175 !vmcore_elf64_check_arch(&ehdr) ||
1176 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1177 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1178 ehdr.e_version != EV_CURRENT ||
1179 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1180 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1181 ehdr.e_phnum == 0) {
1182 pr_warn("Warning: Core image elf header is not sane\n");
1186 /* Read in all elf headers. */
1187 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1188 ehdr.e_phnum * sizeof(Elf64_Phdr);
1189 elfcorebuf_sz = elfcorebuf_sz_orig;
1190 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1191 get_order(elfcorebuf_sz_orig));
1194 addr = elfcorehdr_addr;
1195 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1199 /* Merge all PT_NOTE headers into one. */
1200 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1201 &elfnotes_buf, &elfnotes_sz);
1204 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1205 elfnotes_sz, &vmcore_list);
1208 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1215 static int __init parse_crash_elf32_headers(void)
1221 addr = elfcorehdr_addr;
1223 /* Read Elf header */
1224 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1228 /* Do some basic Verification. */
1229 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1230 (ehdr.e_type != ET_CORE) ||
1231 !vmcore_elf32_check_arch(&ehdr) ||
1232 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1233 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1234 ehdr.e_version != EV_CURRENT ||
1235 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1236 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1237 ehdr.e_phnum == 0) {
1238 pr_warn("Warning: Core image elf header is not sane\n");
1242 /* Read in all elf headers. */
1243 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1244 elfcorebuf_sz = elfcorebuf_sz_orig;
1245 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1246 get_order(elfcorebuf_sz_orig));
1249 addr = elfcorehdr_addr;
1250 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1254 /* Merge all PT_NOTE headers into one. */
1255 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1256 &elfnotes_buf, &elfnotes_sz);
1259 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1260 elfnotes_sz, &vmcore_list);
1263 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1270 static int __init parse_crash_elf_headers(void)
1272 unsigned char e_ident[EI_NIDENT];
1276 addr = elfcorehdr_addr;
1277 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1280 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1281 pr_warn("Warning: Core image elf header not found\n");
1285 if (e_ident[EI_CLASS] == ELFCLASS64) {
1286 rc = parse_crash_elf64_headers();
1289 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1290 rc = parse_crash_elf32_headers();
1294 pr_warn("Warning: Core image elf header is not sane\n");
1298 /* Determine vmcore size. */
1299 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1305 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1307 * vmcoredd_write_header - Write vmcore device dump header at the
1308 * beginning of the dump's buffer.
1309 * @buf: Output buffer where the note is written
1311 * @size: Size of the dump
1313 * Fills beginning of the dump's buffer with vmcore device dump header.
1315 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1318 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1320 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1321 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1322 vdd_hdr->n_type = NT_VMCOREDD;
1324 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1325 sizeof(vdd_hdr->name));
1326 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1330 * vmcoredd_update_program_headers - Update all Elf program headers
1331 * @elfptr: Pointer to elf header
1332 * @elfnotesz: Size of elf notes aligned to page size
1333 * @vmcoreddsz: Size of device dumps to be added to elf note header
1335 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1336 * Also update the offsets of all the program headers after the elf note header.
1338 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1341 unsigned char *e_ident = (unsigned char *)elfptr;
1342 u64 start, end, size;
1346 vmcore_off = elfcorebuf_sz + elfnotesz;
1348 if (e_ident[EI_CLASS] == ELFCLASS64) {
1349 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1350 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1352 /* Update all program headers */
1353 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1354 if (phdr->p_type == PT_NOTE) {
1355 /* Update note size */
1356 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1357 phdr->p_filesz = phdr->p_memsz;
1361 start = rounddown(phdr->p_offset, PAGE_SIZE);
1362 end = roundup(phdr->p_offset + phdr->p_memsz,
1365 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1369 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1370 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1372 /* Update all program headers */
1373 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1374 if (phdr->p_type == PT_NOTE) {
1375 /* Update note size */
1376 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1377 phdr->p_filesz = phdr->p_memsz;
1381 start = rounddown(phdr->p_offset, PAGE_SIZE);
1382 end = roundup(phdr->p_offset + phdr->p_memsz,
1385 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1392 * vmcoredd_update_size - Update the total size of the device dumps and update
1394 * @dump_size: Size of the current device dump to be added to total size
1396 * Update the total size of all the device dumps and update the Elf program
1397 * headers. Calculate the new offsets for the vmcore list and update the
1398 * total vmcore size.
1400 static void vmcoredd_update_size(size_t dump_size)
1402 vmcoredd_orig_sz += dump_size;
1403 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1404 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1407 /* Update vmcore list offsets */
1408 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1410 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1412 proc_vmcore->size = vmcore_size;
1416 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1419 * Allocate a buffer and invoke the calling driver's dump collect routine.
1420 * Write Elf note at the beginning of the buffer to indicate vmcore device
1421 * dump and add the dump to global list.
1423 int vmcore_add_device_dump(struct vmcoredd_data *data)
1425 struct vmcoredd_node *dump;
1430 if (!data || !strlen(data->dump_name) ||
1431 !data->vmcoredd_callback || !data->size)
1434 dump = vzalloc(sizeof(*dump));
1440 /* Keep size of the buffer page aligned so that it can be mmaped */
1441 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1444 /* Allocate buffer for driver's to write their dumps */
1445 buf = vmcore_alloc_buf(data_size);
1451 vmcoredd_write_header(buf, data, data_size -
1452 sizeof(struct vmcoredd_header));
1454 /* Invoke the driver's dump collection routing */
1455 ret = data->vmcoredd_callback(data, buf +
1456 sizeof(struct vmcoredd_header));
1461 dump->size = data_size;
1463 /* Add the dump to driver sysfs list */
1464 mutex_lock(&vmcoredd_mutex);
1465 list_add_tail(&dump->list, &vmcoredd_list);
1466 mutex_unlock(&vmcoredd_mutex);
1468 vmcoredd_update_size(data_size);
1480 EXPORT_SYMBOL(vmcore_add_device_dump);
1481 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1483 /* Free all dumps in vmcore device dump list */
1484 static void vmcore_free_device_dumps(void)
1486 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1487 mutex_lock(&vmcoredd_mutex);
1488 while (!list_empty(&vmcoredd_list)) {
1489 struct vmcoredd_node *dump;
1491 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1493 list_del(&dump->list);
1497 mutex_unlock(&vmcoredd_mutex);
1498 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1501 /* Init function for vmcore module. */
1502 static int __init vmcore_init(void)
1506 /* Allow architectures to allocate ELF header in 2nd kernel */
1507 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1511 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1512 * then capture the dump.
1514 if (!(is_vmcore_usable()))
1516 rc = parse_crash_elf_headers();
1518 pr_warn("Kdump: vmcore not initialized\n");
1521 elfcorehdr_free(elfcorehdr_addr);
1522 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1524 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1526 proc_vmcore->size = vmcore_size;
1529 fs_initcall(vmcore_init);
1531 /* Cleanup function for vmcore module. */
1532 void vmcore_cleanup(void)
1535 proc_remove(proc_vmcore);
1539 /* clear the vmcore list. */
1540 while (!list_empty(&vmcore_list)) {
1543 m = list_first_entry(&vmcore_list, struct vmcore, list);
1549 /* clear vmcore device dump list */
1550 vmcore_free_device_dumps();