Merge branch 'kvm-amd-pmu-fixes' into HEAD
[linux-2.6-microblaze.git] / arch / arm64 / kernel / elfcore.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/coredump.h>
4 #include <linux/elfcore.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7
8 #include <asm/cpufeature.h>
9 #include <asm/mte.h>
10
11 #define for_each_mte_vma(tsk, vma)                                      \
12         if (system_supports_mte())                                      \
13                 for (vma = tsk->mm->mmap; vma; vma = vma->vm_next)      \
14                         if (vma->vm_flags & VM_MTE)
15
16 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
17 {
18         if (vma->vm_flags & VM_DONTDUMP)
19                 return 0;
20
21         return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
22 }
23
24 /* Derived from dump_user_range(); start/end must be page-aligned */
25 static int mte_dump_tag_range(struct coredump_params *cprm,
26                               unsigned long start, unsigned long end)
27 {
28         int ret = 1;
29         unsigned long addr;
30         void *tags = NULL;
31
32         for (addr = start; addr < end; addr += PAGE_SIZE) {
33                 struct page *page = get_dump_page(addr);
34
35                 /*
36                  * get_dump_page() returns NULL when encountering an empty
37                  * page table entry that would otherwise have been filled with
38                  * the zero page. Skip the equivalent tag dump which would
39                  * have been all zeros.
40                  */
41                 if (!page) {
42                         dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
43                         continue;
44                 }
45
46                 /*
47                  * Pages mapped in user space as !pte_access_permitted() (e.g.
48                  * PROT_EXEC only) may not have the PG_mte_tagged flag set.
49                  */
50                 if (!test_bit(PG_mte_tagged, &page->flags)) {
51                         put_page(page);
52                         dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
53                         continue;
54                 }
55
56                 if (!tags) {
57                         tags = mte_allocate_tag_storage();
58                         if (!tags) {
59                                 put_page(page);
60                                 ret = 0;
61                                 break;
62                         }
63                 }
64
65                 mte_save_page_tags(page_address(page), tags);
66                 put_page(page);
67                 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
68                         mte_free_tag_storage(tags);
69                         ret = 0;
70                         break;
71                 }
72         }
73
74         if (tags)
75                 mte_free_tag_storage(tags);
76
77         return ret;
78 }
79
80 Elf_Half elf_core_extra_phdrs(void)
81 {
82         struct vm_area_struct *vma;
83         int vma_count = 0;
84
85         for_each_mte_vma(current, vma)
86                 vma_count++;
87
88         return vma_count;
89 }
90
91 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
92 {
93         struct vm_area_struct *vma;
94
95         for_each_mte_vma(current, vma) {
96                 struct elf_phdr phdr;
97
98                 phdr.p_type = PT_ARM_MEMTAG_MTE;
99                 phdr.p_offset = offset;
100                 phdr.p_vaddr = vma->vm_start;
101                 phdr.p_paddr = 0;
102                 phdr.p_filesz = mte_vma_tag_dump_size(vma);
103                 phdr.p_memsz = vma->vm_end - vma->vm_start;
104                 offset += phdr.p_filesz;
105                 phdr.p_flags = 0;
106                 phdr.p_align = 0;
107
108                 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
109                         return 0;
110         }
111
112         return 1;
113 }
114
115 size_t elf_core_extra_data_size(void)
116 {
117         struct vm_area_struct *vma;
118         size_t data_size = 0;
119
120         for_each_mte_vma(current, vma)
121                 data_size += mte_vma_tag_dump_size(vma);
122
123         return data_size;
124 }
125
126 int elf_core_write_extra_data(struct coredump_params *cprm)
127 {
128         struct vm_area_struct *vma;
129
130         for_each_mte_vma(current, vma) {
131                 if (vma->vm_flags & VM_DONTDUMP)
132                         continue;
133
134                 if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
135                         return 0;
136         }
137
138         return 1;
139 }