1 // SPDX-License-Identifier: GPL-2.0
3 * Memory subsystem support
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
22 #include <linux/mutex.h>
23 #include <linux/stat.h>
24 #include <linux/slab.h>
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
29 static DEFINE_MUTEX(mem_sysfs_mutex);
31 #define MEMORY_CLASS_NAME "memory"
33 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
35 static int sections_per_block;
37 static inline int base_memory_block_id(int section_nr)
39 return section_nr / sections_per_block;
42 static int memory_subsys_online(struct device *dev);
43 static int memory_subsys_offline(struct device *dev);
45 static struct bus_type memory_subsys = {
46 .name = MEMORY_CLASS_NAME,
47 .dev_name = MEMORY_CLASS_NAME,
48 .online = memory_subsys_online,
49 .offline = memory_subsys_offline,
52 static BLOCKING_NOTIFIER_HEAD(memory_chain);
54 int register_memory_notifier(struct notifier_block *nb)
56 return blocking_notifier_chain_register(&memory_chain, nb);
58 EXPORT_SYMBOL(register_memory_notifier);
60 void unregister_memory_notifier(struct notifier_block *nb)
62 blocking_notifier_chain_unregister(&memory_chain, nb);
64 EXPORT_SYMBOL(unregister_memory_notifier);
66 static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
68 int register_memory_isolate_notifier(struct notifier_block *nb)
70 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
72 EXPORT_SYMBOL(register_memory_isolate_notifier);
74 void unregister_memory_isolate_notifier(struct notifier_block *nb)
76 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
78 EXPORT_SYMBOL(unregister_memory_isolate_notifier);
80 static void memory_block_release(struct device *dev)
82 struct memory_block *mem = to_memory_block(dev);
87 unsigned long __weak memory_block_size_bytes(void)
89 return MIN_MEMORY_BLOCK_SIZE;
92 static unsigned long get_memory_block_size(void)
94 unsigned long block_sz;
96 block_sz = memory_block_size_bytes();
98 /* Validate blk_sz is a power of 2 and not less than section size */
99 if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
101 block_sz = MIN_MEMORY_BLOCK_SIZE;
108 * use this as the physical section index that this memsection
112 static ssize_t phys_index_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
115 struct memory_block *mem = to_memory_block(dev);
116 unsigned long phys_index;
118 phys_index = mem->start_section_nr / sections_per_block;
119 return sprintf(buf, "%08lx\n", phys_index);
123 * Show whether the section of memory is likely to be hot-removable
125 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
128 unsigned long i, pfn;
130 struct memory_block *mem = to_memory_block(dev);
132 if (mem->state != MEM_ONLINE)
135 for (i = 0; i < sections_per_block; i++) {
136 if (!present_section_nr(mem->start_section_nr + i))
138 pfn = section_nr_to_pfn(mem->start_section_nr + i);
139 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
143 return sprintf(buf, "%d\n", ret);
147 * online, offline, going offline, etc.
149 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
152 struct memory_block *mem = to_memory_block(dev);
156 * We can probably put these states in a nice little array
157 * so that they're not open-coded
159 switch (mem->state) {
161 len = sprintf(buf, "online\n");
164 len = sprintf(buf, "offline\n");
166 case MEM_GOING_OFFLINE:
167 len = sprintf(buf, "going-offline\n");
170 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
179 int memory_notify(unsigned long val, void *v)
181 return blocking_notifier_call_chain(&memory_chain, val, v);
184 int memory_isolate_notify(unsigned long val, void *v)
186 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
190 * The probe routines leave the pages uninitialized, just as the bootmem code
191 * does. Make sure we do not access them, but instead use only information from
194 static bool pages_correctly_probed(unsigned long start_pfn)
196 unsigned long section_nr = pfn_to_section_nr(start_pfn);
197 unsigned long section_nr_end = section_nr + sections_per_block;
198 unsigned long pfn = start_pfn;
201 * memmap between sections is not contiguous except with
202 * SPARSEMEM_VMEMMAP. We lookup the page once per section
203 * and assume memmap is contiguous within each section
205 for (; section_nr < section_nr_end; section_nr++) {
206 if (WARN_ON_ONCE(!pfn_valid(pfn)))
209 if (!present_section_nr(section_nr)) {
210 pr_warn("section %ld pfn[%lx, %lx) not present\n",
211 section_nr, pfn, pfn + PAGES_PER_SECTION);
213 } else if (!valid_section_nr(section_nr)) {
214 pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
215 section_nr, pfn, pfn + PAGES_PER_SECTION);
217 } else if (online_section_nr(section_nr)) {
218 pr_warn("section %ld pfn[%lx, %lx) is already online\n",
219 section_nr, pfn, pfn + PAGES_PER_SECTION);
222 pfn += PAGES_PER_SECTION;
229 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
230 * OK to have direct references to sparsemem variables in here.
233 memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
235 unsigned long start_pfn;
236 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
239 start_pfn = section_nr_to_pfn(phys_index);
243 if (!pages_correctly_probed(start_pfn))
246 ret = online_pages(start_pfn, nr_pages, online_type);
249 ret = offline_pages(start_pfn, nr_pages);
252 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
253 "%ld\n", __func__, phys_index, action, action);
260 static int memory_block_change_state(struct memory_block *mem,
261 unsigned long to_state, unsigned long from_state_req)
265 if (mem->state != from_state_req)
268 if (to_state == MEM_OFFLINE)
269 mem->state = MEM_GOING_OFFLINE;
271 ret = memory_block_action(mem->start_section_nr, to_state,
274 mem->state = ret ? from_state_req : to_state;
279 /* The device lock serializes operations on memory_subsys_[online|offline] */
280 static int memory_subsys_online(struct device *dev)
282 struct memory_block *mem = to_memory_block(dev);
285 if (mem->state == MEM_ONLINE)
289 * If we are called from state_store(), online_type will be
290 * set >= 0 Otherwise we were called from the device online
291 * attribute and need to set the online_type.
293 if (mem->online_type < 0)
294 mem->online_type = MMOP_ONLINE_KEEP;
296 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
298 /* clear online_type */
299 mem->online_type = -1;
304 static int memory_subsys_offline(struct device *dev)
306 struct memory_block *mem = to_memory_block(dev);
308 if (mem->state == MEM_OFFLINE)
311 /* Can't offline block with non-present sections */
312 if (mem->section_count != sections_per_block)
315 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
318 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
319 const char *buf, size_t count)
321 struct memory_block *mem = to_memory_block(dev);
322 int ret, online_type;
324 ret = lock_device_hotplug_sysfs();
328 if (sysfs_streq(buf, "online_kernel"))
329 online_type = MMOP_ONLINE_KERNEL;
330 else if (sysfs_streq(buf, "online_movable"))
331 online_type = MMOP_ONLINE_MOVABLE;
332 else if (sysfs_streq(buf, "online"))
333 online_type = MMOP_ONLINE_KEEP;
334 else if (sysfs_streq(buf, "offline"))
335 online_type = MMOP_OFFLINE;
341 switch (online_type) {
342 case MMOP_ONLINE_KERNEL:
343 case MMOP_ONLINE_MOVABLE:
344 case MMOP_ONLINE_KEEP:
345 /* mem->online_type is protected by device_hotplug_lock */
346 mem->online_type = online_type;
347 ret = device_online(&mem->dev);
350 ret = device_offline(&mem->dev);
353 ret = -EINVAL; /* should never happen */
357 unlock_device_hotplug();
368 * phys_device is a bad name for this. What I really want
369 * is a way to differentiate between memory ranges that
370 * are part of physical devices that constitute
371 * a complete removable unit or fru.
372 * i.e. do these ranges belong to the same physical device,
373 * s.t. if I offline all of these sections I can then
374 * remove the physical device?
376 static ssize_t phys_device_show(struct device *dev,
377 struct device_attribute *attr, char *buf)
379 struct memory_block *mem = to_memory_block(dev);
380 return sprintf(buf, "%d\n", mem->phys_device);
383 #ifdef CONFIG_MEMORY_HOTREMOVE
384 static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
385 unsigned long nr_pages, int online_type,
386 struct zone *default_zone)
390 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
391 if (zone != default_zone) {
393 strcat(buf, zone->name);
397 static ssize_t valid_zones_show(struct device *dev,
398 struct device_attribute *attr, char *buf)
400 struct memory_block *mem = to_memory_block(dev);
401 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
402 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
403 unsigned long valid_start_pfn, valid_end_pfn;
404 struct zone *default_zone;
408 * Check the existing zone. Make sure that we do that only on the
409 * online nodes otherwise the page_zone is not reliable
411 if (mem->state == MEM_ONLINE) {
413 * The block contains more than one zone can not be offlined.
414 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
416 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
417 &valid_start_pfn, &valid_end_pfn))
418 return sprintf(buf, "none\n");
419 start_pfn = valid_start_pfn;
420 strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
425 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
426 strcat(buf, default_zone->name);
428 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
430 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
437 static DEVICE_ATTR_RO(valid_zones);
440 static DEVICE_ATTR_RO(phys_index);
441 static DEVICE_ATTR_RW(state);
442 static DEVICE_ATTR_RO(phys_device);
443 static DEVICE_ATTR_RO(removable);
446 * Block size attribute stuff
448 static ssize_t block_size_bytes_show(struct device *dev,
449 struct device_attribute *attr, char *buf)
451 return sprintf(buf, "%lx\n", get_memory_block_size());
454 static DEVICE_ATTR_RO(block_size_bytes);
457 * Memory auto online policy.
460 static ssize_t auto_online_blocks_show(struct device *dev,
461 struct device_attribute *attr, char *buf)
463 if (memhp_auto_online)
464 return sprintf(buf, "online\n");
466 return sprintf(buf, "offline\n");
469 static ssize_t auto_online_blocks_store(struct device *dev,
470 struct device_attribute *attr,
471 const char *buf, size_t count)
473 if (sysfs_streq(buf, "online"))
474 memhp_auto_online = true;
475 else if (sysfs_streq(buf, "offline"))
476 memhp_auto_online = false;
483 static DEVICE_ATTR_RW(auto_online_blocks);
486 * Some architectures will have custom drivers to do this, and
487 * will not need to do it from userspace. The fake hot-add code
488 * as well as ppc64 will do all of their discovery in userspace
489 * and will require this interface.
491 #ifdef CONFIG_ARCH_MEMORY_PROBE
492 static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
493 const char *buf, size_t count)
497 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
499 ret = kstrtoull(buf, 0, &phys_addr);
503 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
506 ret = lock_device_hotplug_sysfs();
510 nid = memory_add_physaddr_to_nid(phys_addr);
511 ret = __add_memory(nid, phys_addr,
512 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
519 unlock_device_hotplug();
523 static DEVICE_ATTR_WO(probe);
526 #ifdef CONFIG_MEMORY_FAILURE
528 * Support for offlining pages of memory
531 /* Soft offline a page */
532 static ssize_t soft_offline_page_store(struct device *dev,
533 struct device_attribute *attr,
534 const char *buf, size_t count)
538 if (!capable(CAP_SYS_ADMIN))
540 if (kstrtoull(buf, 0, &pfn) < 0)
545 ret = soft_offline_page(pfn_to_page(pfn), 0);
546 return ret == 0 ? count : ret;
549 /* Forcibly offline a page, including killing processes. */
550 static ssize_t hard_offline_page_store(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf, size_t count)
556 if (!capable(CAP_SYS_ADMIN))
558 if (kstrtoull(buf, 0, &pfn) < 0)
561 ret = memory_failure(pfn, 0);
562 return ret ? ret : count;
565 static DEVICE_ATTR_WO(soft_offline_page);
566 static DEVICE_ATTR_WO(hard_offline_page);
570 * Note that phys_device is optional. It is here to allow for
571 * differentiation between which *physical* devices each
572 * section belongs to...
574 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
580 * A reference for the returned object is held and the reference for the
581 * hinted object is released.
583 struct memory_block *find_memory_block_hinted(struct mem_section *section,
584 struct memory_block *hint)
586 int block_id = base_memory_block_id(__section_nr(section));
587 struct device *hintdev = hint ? &hint->dev : NULL;
590 dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
592 put_device(&hint->dev);
595 return to_memory_block(dev);
599 * For now, we have a linear search to go find the appropriate
600 * memory_block corresponding to a particular phys_index. If
601 * this gets to be a real problem, we can always use a radix
602 * tree or something here.
604 * This could be made generic for all device subsystems.
606 struct memory_block *find_memory_block(struct mem_section *section)
608 return find_memory_block_hinted(section, NULL);
611 static struct attribute *memory_memblk_attrs[] = {
612 &dev_attr_phys_index.attr,
613 &dev_attr_state.attr,
614 &dev_attr_phys_device.attr,
615 &dev_attr_removable.attr,
616 #ifdef CONFIG_MEMORY_HOTREMOVE
617 &dev_attr_valid_zones.attr,
622 static struct attribute_group memory_memblk_attr_group = {
623 .attrs = memory_memblk_attrs,
626 static const struct attribute_group *memory_memblk_attr_groups[] = {
627 &memory_memblk_attr_group,
632 * register_memory - Setup a sysfs device for a memory block
635 int register_memory(struct memory_block *memory)
639 memory->dev.bus = &memory_subsys;
640 memory->dev.id = memory->start_section_nr / sections_per_block;
641 memory->dev.release = memory_block_release;
642 memory->dev.groups = memory_memblk_attr_groups;
643 memory->dev.offline = memory->state == MEM_OFFLINE;
645 ret = device_register(&memory->dev);
647 put_device(&memory->dev);
652 static int init_memory_block(struct memory_block **memory,
653 struct mem_section *section, unsigned long state)
655 struct memory_block *mem;
656 unsigned long start_pfn;
660 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
664 scn_nr = __section_nr(section);
665 mem->start_section_nr =
666 base_memory_block_id(scn_nr) * sections_per_block;
667 mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
669 start_pfn = section_nr_to_pfn(mem->start_section_nr);
670 mem->phys_device = arch_get_memory_phys_device(start_pfn);
672 ret = register_memory(mem);
678 static int add_memory_block(int base_section_nr)
680 struct memory_block *mem;
681 int i, ret, section_count = 0, section_nr;
683 for (i = base_section_nr;
684 i < base_section_nr + sections_per_block;
686 if (!present_section_nr(i))
688 if (section_count == 0)
693 if (section_count == 0)
695 ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
698 mem->section_count = section_count;
703 * need an interface for the VM to add new memory regions,
704 * but without onlining it.
706 int hotplug_memory_register(int nid, struct mem_section *section)
709 struct memory_block *mem;
711 mutex_lock(&mem_sysfs_mutex);
713 mem = find_memory_block(section);
715 mem->section_count++;
716 put_device(&mem->dev);
718 ret = init_memory_block(&mem, section, MEM_OFFLINE);
721 mem->section_count++;
725 mutex_unlock(&mem_sysfs_mutex);
729 #ifdef CONFIG_MEMORY_HOTREMOVE
731 unregister_memory(struct memory_block *memory)
733 BUG_ON(memory->dev.bus != &memory_subsys);
735 /* drop the ref. we got in remove_memory_section() */
736 put_device(&memory->dev);
737 device_unregister(&memory->dev);
740 static int remove_memory_section(unsigned long node_id,
741 struct mem_section *section, int phys_device)
743 struct memory_block *mem;
745 mutex_lock(&mem_sysfs_mutex);
748 * Some users of the memory hotplug do not want/need memblock to
749 * track all sections. Skip over those.
751 mem = find_memory_block(section);
755 unregister_mem_sect_under_nodes(mem, __section_nr(section));
757 mem->section_count--;
758 if (mem->section_count == 0)
759 unregister_memory(mem);
761 put_device(&mem->dev);
764 mutex_unlock(&mem_sysfs_mutex);
768 int unregister_memory_section(struct mem_section *section)
770 if (!present_section(section))
773 return remove_memory_section(0, section, 0);
775 #endif /* CONFIG_MEMORY_HOTREMOVE */
777 /* return true if the memory block is offlined, otherwise, return false */
778 bool is_memblock_offlined(struct memory_block *mem)
780 return mem->state == MEM_OFFLINE;
783 static struct attribute *memory_root_attrs[] = {
784 #ifdef CONFIG_ARCH_MEMORY_PROBE
785 &dev_attr_probe.attr,
788 #ifdef CONFIG_MEMORY_FAILURE
789 &dev_attr_soft_offline_page.attr,
790 &dev_attr_hard_offline_page.attr,
793 &dev_attr_block_size_bytes.attr,
794 &dev_attr_auto_online_blocks.attr,
798 static struct attribute_group memory_root_attr_group = {
799 .attrs = memory_root_attrs,
802 static const struct attribute_group *memory_root_attr_groups[] = {
803 &memory_root_attr_group,
808 * Initialize the sysfs support for memory devices...
810 int __init memory_dev_init(void)
815 unsigned long block_sz;
817 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
821 block_sz = get_memory_block_size();
822 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
825 * Create entries for memory sections that were found
826 * during boot and have been initialized
828 mutex_lock(&mem_sysfs_mutex);
829 for (i = 0; i <= __highest_present_section_nr;
830 i += sections_per_block) {
831 err = add_memory_block(i);
835 mutex_unlock(&mem_sysfs_mutex);
839 printk(KERN_ERR "%s() failed: %d\n", __func__, ret);