1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) IBM Corporation, 2014, 2017
4 * Anton Blanchard, Rashmica Gupta.
7 #define pr_fmt(fmt) "memtrace: " fmt
9 #include <linux/bitops.h>
10 #include <linux/string.h>
11 #include <linux/memblock.h>
12 #include <linux/init.h>
13 #include <linux/moduleparam.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/numa.h>
20 #include <asm/machdep.h>
21 #include <asm/debugfs.h>
22 #include <asm/cacheflush.h>
24 /* This enables us to keep track of the memory removed from each node. */
25 struct memtrace_entry {
34 static DEFINE_MUTEX(memtrace_mutex);
35 static u64 memtrace_size;
37 static struct memtrace_entry *memtrace_array;
38 static unsigned int memtrace_array_nr;
41 static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
42 size_t count, loff_t *ppos)
44 struct memtrace_entry *ent = filp->private_data;
46 return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
49 static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
51 struct memtrace_entry *ent = filp->private_data;
53 if (ent->size < vma->vm_end - vma->vm_start)
56 if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
59 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
60 return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
61 vma->vm_end - vma->vm_start, vma->vm_page_prot);
64 static const struct file_operations memtrace_fops = {
65 .llseek = default_llseek,
66 .read = memtrace_read,
68 .mmap = memtrace_mmap,
71 #define FLUSH_CHUNK_SIZE SZ_1G
73 * flush_dcache_range_chunked(): Write any modified data cache blocks out to
74 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
75 * Does not invalidate the corresponding instruction cache blocks.
77 * @start: the start address
78 * @stop: the stop address (exclusive)
79 * @chunk: the max size of the chunks
81 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
86 for (i = start; i < stop; i += chunk) {
87 flush_dcache_range(i, min(stop, i + chunk));
92 static void memtrace_clear_range(unsigned long start_pfn,
93 unsigned long nr_pages)
97 /* As HIGHMEM does not apply, use clear_page() directly. */
98 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
99 if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
101 clear_page(__va(PFN_PHYS(pfn)));
104 * Before we go ahead and use this range as cache inhibited range
107 flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
108 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
112 static u64 memtrace_alloc_node(u32 nid, u64 size)
114 const unsigned long nr_pages = PHYS_PFN(size);
115 unsigned long pfn, start_pfn;
119 * Trace memory needs to be aligned to the size, which is guaranteed
120 * by alloc_contig_pages().
122 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
123 __GFP_NOWARN, nid, NULL);
126 start_pfn = page_to_pfn(page);
129 * Clear the range while we still have a linear mapping.
131 * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
133 memtrace_clear_range(start_pfn, nr_pages);
136 * Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
137 * dumping, ...) should be touching these pages.
139 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
140 __SetPageOffline(pfn_to_page(pfn));
142 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
144 return PFN_PHYS(start_pfn);
147 static int memtrace_init_regions_runtime(u64 size)
152 memtrace_array = kcalloc(num_online_nodes(),
153 sizeof(struct memtrace_entry), GFP_KERNEL);
154 if (!memtrace_array) {
155 pr_err("Failed to allocate memtrace_array\n");
159 for_each_online_node(nid) {
160 m = memtrace_alloc_node(nid, size);
163 * A node might not have any local memory, so warn but
167 pr_err("Failed to allocate trace memory on node %d\n", nid);
171 pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
173 memtrace_array[memtrace_array_nr].start = m;
174 memtrace_array[memtrace_array_nr].size = size;
175 memtrace_array[memtrace_array_nr].nid = nid;
182 static struct dentry *memtrace_debugfs_dir;
184 static int memtrace_init_debugfs(void)
189 for (i = 0; i < memtrace_array_nr; i++) {
191 struct memtrace_entry *ent = &memtrace_array[i];
193 ent->mem = ioremap(ent->start, ent->size);
194 /* Warn but continue on */
196 pr_err("Failed to map trace memory at 0x%llx\n",
202 snprintf(ent->name, 16, "%08x", ent->nid);
203 dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
206 debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
207 debugfs_create_x64("start", 0400, dir, &ent->start);
208 debugfs_create_x64("size", 0400, dir, &ent->size);
214 static int memtrace_free(int nid, u64 start, u64 size)
216 struct mhp_params params = { .pgprot = PAGE_KERNEL };
217 const unsigned long nr_pages = PHYS_PFN(size);
218 const unsigned long start_pfn = PHYS_PFN(start);
222 ret = arch_create_linear_mapping(nid, start, size, ¶ms);
226 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
227 __ClearPageOffline(pfn_to_page(pfn));
229 free_contig_range(start_pfn, nr_pages);
234 * Iterate through the chunks of memory we allocated and attempt to expose
235 * them back to the kernel.
237 static int memtrace_free_regions(void)
240 struct memtrace_entry *ent;
242 for (i = memtrace_array_nr - 1; i >= 0; i--) {
243 ent = &memtrace_array[i];
245 /* We have freed this chunk previously */
246 if (ent->nid == NUMA_NO_NODE)
249 /* Remove from io mappings */
255 if (memtrace_free(ent->nid, ent->start, ent->size)) {
256 pr_err("Failed to free trace memory on node %d\n",
263 * Memory was freed successfully so clean up references to it
264 * so on reentry we can tell that this chunk was freed.
266 debugfs_remove_recursive(ent->dir);
267 pr_info("Freed trace memory back on node %d\n", ent->nid);
268 ent->size = ent->start = ent->nid = NUMA_NO_NODE;
273 /* If all chunks of memory were freed successfully, reset globals */
274 kfree(memtrace_array);
275 memtrace_array = NULL;
277 memtrace_array_nr = 0;
281 static int memtrace_enable_set(void *data, u64 val)
287 * Don't attempt to do anything if size isn't aligned to a memory
288 * block or equal to zero.
290 bytes = memory_block_size_bytes();
291 if (val & (bytes - 1)) {
292 pr_err("Value must be aligned with 0x%llx\n", bytes);
296 mutex_lock(&memtrace_mutex);
298 /* Free all previously allocated memory. */
299 if (memtrace_size && memtrace_free_regions())
307 /* Allocate memory. */
308 if (memtrace_init_regions_runtime(val))
311 if (memtrace_init_debugfs())
317 mutex_unlock(&memtrace_mutex);
321 static int memtrace_enable_get(void *data, u64 *val)
323 *val = memtrace_size;
327 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
328 memtrace_enable_set, "0x%016llx\n");
330 static int memtrace_init(void)
332 memtrace_debugfs_dir = debugfs_create_dir("memtrace",
333 powerpc_debugfs_root);
335 debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
336 NULL, &memtrace_init_fops);
340 machine_device_initcall(powernv, memtrace_init);