1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) IBM Corporation, 2014, 2017
4 * Anton Blanchard, Rashmica Gupta.
7 #define pr_fmt(fmt) "memtrace: " fmt
9 #include <linux/bitops.h>
10 #include <linux/string.h>
11 #include <linux/memblock.h>
12 #include <linux/init.h>
13 #include <linux/moduleparam.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/numa.h>
20 #include <asm/machdep.h>
21 #include <asm/debugfs.h>
23 /* This enables us to keep track of the memory removed from each node. */
24 struct memtrace_entry {
33 static DEFINE_MUTEX(memtrace_mutex);
34 static u64 memtrace_size;
36 static struct memtrace_entry *memtrace_array;
37 static unsigned int memtrace_array_nr;
40 static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
41 size_t count, loff_t *ppos)
43 struct memtrace_entry *ent = filp->private_data;
45 return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
48 static const struct file_operations memtrace_fops = {
49 .llseek = default_llseek,
50 .read = memtrace_read,
54 static int check_memblock_online(struct memory_block *mem, void *arg)
56 if (mem->state != MEM_ONLINE)
62 static int change_memblock_state(struct memory_block *mem, void *arg)
64 unsigned long state = (unsigned long)arg;
71 static void memtrace_clear_range(unsigned long start_pfn,
72 unsigned long nr_pages)
77 * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM
78 * does not apply, avoid passing around "struct page" and use
79 * clear_page() instead directly.
81 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
82 if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
84 clear_page(__va(PFN_PHYS(pfn)));
88 /* called with device_hotplug_lock held */
89 static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
91 const unsigned long start = PFN_PHYS(start_pfn);
92 const unsigned long size = PFN_PHYS(nr_pages);
94 if (walk_memory_blocks(start, size, NULL, check_memblock_online))
97 walk_memory_blocks(start, size, (void *)MEM_GOING_OFFLINE,
98 change_memblock_state);
100 if (offline_pages(start_pfn, nr_pages)) {
101 walk_memory_blocks(start, size, (void *)MEM_ONLINE,
102 change_memblock_state);
106 walk_memory_blocks(start, size, (void *)MEM_OFFLINE,
107 change_memblock_state);
113 static u64 memtrace_alloc_node(u32 nid, u64 size)
115 u64 start_pfn, end_pfn, nr_pages, pfn;
117 u64 bytes = memory_block_size_bytes();
119 if (!node_spanned_pages(nid))
122 start_pfn = node_start_pfn(nid);
123 end_pfn = node_end_pfn(nid);
124 nr_pages = size >> PAGE_SHIFT;
126 /* Trace memory needs to be aligned to the size */
127 end_pfn = round_down(end_pfn - nr_pages, nr_pages);
129 lock_device_hotplug();
130 for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
131 if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
133 * Clear the range while we still have a linear
136 memtrace_clear_range(base_pfn, nr_pages);
138 * Remove memory in memory block size chunks so that
139 * iomem resources are always split to the same size and
140 * we never try to remove memory that spans two iomem
143 end_pfn = base_pfn + nr_pages;
144 for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
145 __remove_memory(nid, pfn << PAGE_SHIFT, bytes);
147 unlock_device_hotplug();
148 return base_pfn << PAGE_SHIFT;
151 unlock_device_hotplug();
156 static int memtrace_init_regions_runtime(u64 size)
161 memtrace_array = kcalloc(num_online_nodes(),
162 sizeof(struct memtrace_entry), GFP_KERNEL);
163 if (!memtrace_array) {
164 pr_err("Failed to allocate memtrace_array\n");
168 for_each_online_node(nid) {
169 m = memtrace_alloc_node(nid, size);
172 * A node might not have any local memory, so warn but
176 pr_err("Failed to allocate trace memory on node %d\n", nid);
180 pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
182 memtrace_array[memtrace_array_nr].start = m;
183 memtrace_array[memtrace_array_nr].size = size;
184 memtrace_array[memtrace_array_nr].nid = nid;
191 static struct dentry *memtrace_debugfs_dir;
193 static int memtrace_init_debugfs(void)
198 for (i = 0; i < memtrace_array_nr; i++) {
200 struct memtrace_entry *ent = &memtrace_array[i];
202 ent->mem = ioremap(ent->start, ent->size);
203 /* Warn but continue on */
205 pr_err("Failed to map trace memory at 0x%llx\n",
211 snprintf(ent->name, 16, "%08x", ent->nid);
212 dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
214 pr_err("Failed to create debugfs directory for node %d\n",
220 debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
221 debugfs_create_x64("start", 0400, dir, &ent->start);
222 debugfs_create_x64("size", 0400, dir, &ent->size);
228 static int online_mem_block(struct memory_block *mem, void *arg)
230 return device_online(&mem->dev);
234 * Iterate through the chunks of memory we have removed from the kernel
235 * and attempt to add them back to the kernel.
237 static int memtrace_online(void)
240 struct memtrace_entry *ent;
242 for (i = memtrace_array_nr - 1; i >= 0; i--) {
243 ent = &memtrace_array[i];
245 /* We have onlined this chunk previously */
246 if (ent->nid == NUMA_NO_NODE)
249 /* Remove from io mappings */
255 if (add_memory(ent->nid, ent->start, ent->size)) {
256 pr_err("Failed to add trace memory to node %d\n",
263 * If kernel isn't compiled with the auto online option
264 * we need to online the memory ourselves.
266 if (!memhp_auto_online) {
267 lock_device_hotplug();
268 walk_memory_blocks(ent->start, ent->size, NULL,
270 unlock_device_hotplug();
274 * Memory was added successfully so clean up references to it
275 * so on reentry we can tell that this chunk was added.
277 debugfs_remove_recursive(ent->dir);
278 pr_info("Added trace memory back to node %d\n", ent->nid);
279 ent->size = ent->start = ent->nid = NUMA_NO_NODE;
284 /* If all chunks of memory were added successfully, reset globals */
285 kfree(memtrace_array);
286 memtrace_array = NULL;
288 memtrace_array_nr = 0;
292 static int memtrace_enable_set(void *data, u64 val)
298 * Don't attempt to do anything if size isn't aligned to a memory
299 * block or equal to zero.
301 bytes = memory_block_size_bytes();
302 if (val & (bytes - 1)) {
303 pr_err("Value must be aligned with 0x%llx\n", bytes);
307 mutex_lock(&memtrace_mutex);
309 /* Re-add/online previously removed/offlined memory */
311 if (memtrace_online())
320 /* Offline and remove memory */
321 if (memtrace_init_regions_runtime(val))
324 if (memtrace_init_debugfs())
330 mutex_unlock(&memtrace_mutex);
334 static int memtrace_enable_get(void *data, u64 *val)
336 *val = memtrace_size;
340 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
341 memtrace_enable_set, "0x%016llx\n");
343 static int memtrace_init(void)
345 memtrace_debugfs_dir = debugfs_create_dir("memtrace",
346 powerpc_debugfs_root);
347 if (!memtrace_debugfs_dir)
350 debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
351 NULL, &memtrace_init_fops);
355 machine_device_initcall(powernv, memtrace_init);