1 // SPDX-License-Identifier: GPL-2.0
3 * Memory subsystem support
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
24 #include <linux/xarray.h>
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
29 #define MEMORY_CLASS_NAME "memory"
31 static const char *const online_type_to_str[] = {
32 [MMOP_OFFLINE] = "offline",
33 [MMOP_ONLINE] = "online",
34 [MMOP_ONLINE_KERNEL] = "online_kernel",
35 [MMOP_ONLINE_MOVABLE] = "online_movable",
38 int mhp_online_type_from_str(const char *str)
42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 if (sysfs_streq(str, online_type_to_str[i]))
49 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
51 static int sections_per_block;
53 static inline unsigned long memory_block_id(unsigned long section_nr)
55 return section_nr / sections_per_block;
58 static inline unsigned long pfn_to_block_id(unsigned long pfn)
60 return memory_block_id(pfn_to_section_nr(pfn));
63 static inline unsigned long phys_to_block_id(unsigned long phys)
65 return pfn_to_block_id(PFN_DOWN(phys));
68 static int memory_subsys_online(struct device *dev);
69 static int memory_subsys_offline(struct device *dev);
71 static struct bus_type memory_subsys = {
72 .name = MEMORY_CLASS_NAME,
73 .dev_name = MEMORY_CLASS_NAME,
74 .online = memory_subsys_online,
75 .offline = memory_subsys_offline,
79 * Memory blocks are cached in a local radix tree to avoid
80 * a costly linear search for the corresponding device on
83 static DEFINE_XARRAY(memory_blocks);
86 * Memory groups, indexed by memory group id (mgid).
88 static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
90 static BLOCKING_NOTIFIER_HEAD(memory_chain);
92 int register_memory_notifier(struct notifier_block *nb)
94 return blocking_notifier_chain_register(&memory_chain, nb);
96 EXPORT_SYMBOL(register_memory_notifier);
98 void unregister_memory_notifier(struct notifier_block *nb)
100 blocking_notifier_chain_unregister(&memory_chain, nb);
102 EXPORT_SYMBOL(unregister_memory_notifier);
104 static void memory_block_release(struct device *dev)
106 struct memory_block *mem = to_memory_block(dev);
111 unsigned long __weak memory_block_size_bytes(void)
113 return MIN_MEMORY_BLOCK_SIZE;
115 EXPORT_SYMBOL_GPL(memory_block_size_bytes);
118 * Show the first physical section index (number) of this memory block.
120 static ssize_t phys_index_show(struct device *dev,
121 struct device_attribute *attr, char *buf)
123 struct memory_block *mem = to_memory_block(dev);
124 unsigned long phys_index;
126 phys_index = mem->start_section_nr / sections_per_block;
128 return sysfs_emit(buf, "%08lx\n", phys_index);
132 * Legacy interface that we cannot remove. Always indicate "removable"
133 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
135 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
138 return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
142 * online, offline, going offline, etc.
144 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
147 struct memory_block *mem = to_memory_block(dev);
151 * We can probably put these states in a nice little array
152 * so that they're not open-coded
154 switch (mem->state) {
161 case MEM_GOING_OFFLINE:
162 output = "going-offline";
166 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
169 return sysfs_emit(buf, "%s\n", output);
172 int memory_notify(unsigned long val, void *v)
174 return blocking_notifier_call_chain(&memory_chain, val, v);
177 static int memory_block_online(struct memory_block *mem)
179 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
180 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
181 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
185 zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
188 * Although vmemmap pages have a different lifecycle than the pages
189 * they describe (they remain until the memory is unplugged), doing
190 * their initialization and accounting at memory onlining/offlining
191 * stage helps to keep accounting easier to follow - e.g vmemmaps
192 * belong to the same zone as the memory they backed.
194 if (nr_vmemmap_pages) {
195 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
200 ret = online_pages(start_pfn + nr_vmemmap_pages,
201 nr_pages - nr_vmemmap_pages, zone);
203 if (nr_vmemmap_pages)
204 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
209 * Account once onlining succeeded. If the zone was unpopulated, it is
210 * now already properly populated.
212 if (nr_vmemmap_pages)
213 adjust_present_page_count(pfn_to_page(start_pfn),
219 static int memory_block_offline(struct memory_block *mem)
221 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
222 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
223 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
227 * Unaccount before offlining, such that unpopulated zone and kthreads
228 * can properly be torn down in offline_pages().
230 if (nr_vmemmap_pages)
231 adjust_present_page_count(pfn_to_page(start_pfn),
234 ret = offline_pages(start_pfn + nr_vmemmap_pages,
235 nr_pages - nr_vmemmap_pages);
237 /* offline_pages() failed. Account back. */
238 if (nr_vmemmap_pages)
239 adjust_present_page_count(pfn_to_page(start_pfn),
244 if (nr_vmemmap_pages)
245 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
251 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
252 * OK to have direct references to sparsemem variables in here.
255 memory_block_action(struct memory_block *mem, unsigned long action)
261 ret = memory_block_online(mem);
264 ret = memory_block_offline(mem);
267 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
268 "%ld\n", __func__, mem->start_section_nr, action, action);
275 static int memory_block_change_state(struct memory_block *mem,
276 unsigned long to_state, unsigned long from_state_req)
280 if (mem->state != from_state_req)
283 if (to_state == MEM_OFFLINE)
284 mem->state = MEM_GOING_OFFLINE;
286 ret = memory_block_action(mem, to_state);
287 mem->state = ret ? from_state_req : to_state;
292 /* The device lock serializes operations on memory_subsys_[online|offline] */
293 static int memory_subsys_online(struct device *dev)
295 struct memory_block *mem = to_memory_block(dev);
298 if (mem->state == MEM_ONLINE)
302 * When called via device_online() without configuring the online_type,
303 * we want to default to MMOP_ONLINE.
305 if (mem->online_type == MMOP_OFFLINE)
306 mem->online_type = MMOP_ONLINE;
308 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
309 mem->online_type = MMOP_OFFLINE;
314 static int memory_subsys_offline(struct device *dev)
316 struct memory_block *mem = to_memory_block(dev);
318 if (mem->state == MEM_OFFLINE)
321 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
324 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
325 const char *buf, size_t count)
327 const int online_type = mhp_online_type_from_str(buf);
328 struct memory_block *mem = to_memory_block(dev);
334 ret = lock_device_hotplug_sysfs();
338 switch (online_type) {
339 case MMOP_ONLINE_KERNEL:
340 case MMOP_ONLINE_MOVABLE:
342 /* mem->online_type is protected by device_hotplug_lock */
343 mem->online_type = online_type;
344 ret = device_online(&mem->dev);
347 ret = device_offline(&mem->dev);
350 ret = -EINVAL; /* should never happen */
353 unlock_device_hotplug();
364 * Legacy interface that we cannot remove: s390x exposes the storage increment
365 * covered by a memory block, allowing for identifying which memory blocks
366 * comprise a storage increment. Since a memory block spans complete
367 * storage increments nowadays, this interface is basically unused. Other
368 * archs never exposed != 0.
370 static ssize_t phys_device_show(struct device *dev,
371 struct device_attribute *attr, char *buf)
373 struct memory_block *mem = to_memory_block(dev);
374 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
376 return sysfs_emit(buf, "%d\n",
377 arch_get_memory_phys_device(start_pfn));
380 #ifdef CONFIG_MEMORY_HOTREMOVE
381 static int print_allowed_zone(char *buf, int len, int nid,
382 unsigned long start_pfn, unsigned long nr_pages,
383 int online_type, struct zone *default_zone)
387 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
388 if (zone == default_zone)
391 return sysfs_emit_at(buf, len, " %s", zone->name);
394 static ssize_t valid_zones_show(struct device *dev,
395 struct device_attribute *attr, char *buf)
397 struct memory_block *mem = to_memory_block(dev);
398 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
399 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
400 struct zone *default_zone;
405 * Check the existing zone. Make sure that we do that only on the
406 * online nodes otherwise the page_zone is not reliable
408 if (mem->state == MEM_ONLINE) {
410 * The block contains more than one zone can not be offlined.
411 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
413 default_zone = test_pages_in_a_zone(start_pfn,
414 start_pfn + nr_pages);
416 return sysfs_emit(buf, "%s\n", "none");
417 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
422 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
425 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
426 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
427 MMOP_ONLINE_KERNEL, default_zone);
428 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
429 MMOP_ONLINE_MOVABLE, default_zone);
431 len += sysfs_emit_at(buf, len, "\n");
434 static DEVICE_ATTR_RO(valid_zones);
437 static DEVICE_ATTR_RO(phys_index);
438 static DEVICE_ATTR_RW(state);
439 static DEVICE_ATTR_RO(phys_device);
440 static DEVICE_ATTR_RO(removable);
443 * Show the memory block size (shared by all memory blocks).
445 static ssize_t block_size_bytes_show(struct device *dev,
446 struct device_attribute *attr, char *buf)
448 return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
451 static DEVICE_ATTR_RO(block_size_bytes);
454 * Memory auto online policy.
457 static ssize_t auto_online_blocks_show(struct device *dev,
458 struct device_attribute *attr, char *buf)
460 return sysfs_emit(buf, "%s\n",
461 online_type_to_str[mhp_default_online_type]);
464 static ssize_t auto_online_blocks_store(struct device *dev,
465 struct device_attribute *attr,
466 const char *buf, size_t count)
468 const int online_type = mhp_online_type_from_str(buf);
473 mhp_default_online_type = online_type;
477 static DEVICE_ATTR_RW(auto_online_blocks);
480 * Some architectures will have custom drivers to do this, and
481 * will not need to do it from userspace. The fake hot-add code
482 * as well as ppc64 will do all of their discovery in userspace
483 * and will require this interface.
485 #ifdef CONFIG_ARCH_MEMORY_PROBE
486 static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
487 const char *buf, size_t count)
491 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
493 ret = kstrtoull(buf, 0, &phys_addr);
497 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
500 ret = lock_device_hotplug_sysfs();
504 nid = memory_add_physaddr_to_nid(phys_addr);
505 ret = __add_memory(nid, phys_addr,
506 MIN_MEMORY_BLOCK_SIZE * sections_per_block,
514 unlock_device_hotplug();
518 static DEVICE_ATTR_WO(probe);
521 #ifdef CONFIG_MEMORY_FAILURE
523 * Support for offlining pages of memory
526 /* Soft offline a page */
527 static ssize_t soft_offline_page_store(struct device *dev,
528 struct device_attribute *attr,
529 const char *buf, size_t count)
533 if (!capable(CAP_SYS_ADMIN))
535 if (kstrtoull(buf, 0, &pfn) < 0)
538 ret = soft_offline_page(pfn, 0);
539 return ret == 0 ? count : ret;
542 /* Forcibly offline a page, including killing processes. */
543 static ssize_t hard_offline_page_store(struct device *dev,
544 struct device_attribute *attr,
545 const char *buf, size_t count)
549 if (!capable(CAP_SYS_ADMIN))
551 if (kstrtoull(buf, 0, &pfn) < 0)
554 ret = memory_failure(pfn, 0);
555 return ret ? ret : count;
558 static DEVICE_ATTR_WO(soft_offline_page);
559 static DEVICE_ATTR_WO(hard_offline_page);
562 /* See phys_device_show(). */
563 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
569 * A reference for the returned memory block device is acquired.
571 * Called under device_hotplug_lock.
573 static struct memory_block *find_memory_block_by_id(unsigned long block_id)
575 struct memory_block *mem;
577 mem = xa_load(&memory_blocks, block_id);
579 get_device(&mem->dev);
584 * Called under device_hotplug_lock.
586 struct memory_block *find_memory_block(struct mem_section *section)
588 unsigned long block_id = memory_block_id(__section_nr(section));
590 return find_memory_block_by_id(block_id);
593 static struct attribute *memory_memblk_attrs[] = {
594 &dev_attr_phys_index.attr,
595 &dev_attr_state.attr,
596 &dev_attr_phys_device.attr,
597 &dev_attr_removable.attr,
598 #ifdef CONFIG_MEMORY_HOTREMOVE
599 &dev_attr_valid_zones.attr,
604 static const struct attribute_group memory_memblk_attr_group = {
605 .attrs = memory_memblk_attrs,
608 static const struct attribute_group *memory_memblk_attr_groups[] = {
609 &memory_memblk_attr_group,
614 * register_memory - Setup a sysfs device for a memory block
617 int register_memory(struct memory_block *memory)
621 memory->dev.bus = &memory_subsys;
622 memory->dev.id = memory->start_section_nr / sections_per_block;
623 memory->dev.release = memory_block_release;
624 memory->dev.groups = memory_memblk_attr_groups;
625 memory->dev.offline = memory->state == MEM_OFFLINE;
627 ret = device_register(&memory->dev);
629 put_device(&memory->dev);
632 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
635 put_device(&memory->dev);
636 device_unregister(&memory->dev);
641 static int init_memory_block(unsigned long block_id, unsigned long state,
642 unsigned long nr_vmemmap_pages,
643 struct memory_group *group)
645 struct memory_block *mem;
648 mem = find_memory_block_by_id(block_id);
650 put_device(&mem->dev);
653 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
657 mem->start_section_nr = block_id * sections_per_block;
659 mem->nid = NUMA_NO_NODE;
660 mem->nr_vmemmap_pages = nr_vmemmap_pages;
661 INIT_LIST_HEAD(&mem->group_next);
665 list_add(&mem->group_next, &group->memory_blocks);
668 ret = register_memory(mem);
673 static int add_memory_block(unsigned long base_section_nr)
675 int section_count = 0;
678 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
680 if (present_section_nr(nr))
683 if (section_count == 0)
685 return init_memory_block(memory_block_id(base_section_nr),
686 MEM_ONLINE, 0, NULL);
689 static void unregister_memory(struct memory_block *memory)
691 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
694 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
697 list_del(&memory->group_next);
698 memory->group = NULL;
701 /* drop the ref. we got via find_memory_block() */
702 put_device(&memory->dev);
703 device_unregister(&memory->dev);
707 * Create memory block devices for the given memory area. Start and size
708 * have to be aligned to memory block granularity. Memory block devices
709 * will be initialized as offline.
711 * Called under device_hotplug_lock.
713 int create_memory_block_devices(unsigned long start, unsigned long size,
714 unsigned long vmemmap_pages,
715 struct memory_group *group)
717 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
718 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
719 struct memory_block *mem;
720 unsigned long block_id;
723 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
724 !IS_ALIGNED(size, memory_block_size_bytes())))
727 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
728 ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
734 end_block_id = block_id;
735 for (block_id = start_block_id; block_id != end_block_id;
737 mem = find_memory_block_by_id(block_id);
738 if (WARN_ON_ONCE(!mem))
740 unregister_memory(mem);
747 * Remove memory block devices for the given memory area. Start and size
748 * have to be aligned to memory block granularity. Memory block devices
749 * have to be offline.
751 * Called under device_hotplug_lock.
753 void remove_memory_block_devices(unsigned long start, unsigned long size)
755 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
756 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
757 struct memory_block *mem;
758 unsigned long block_id;
760 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
761 !IS_ALIGNED(size, memory_block_size_bytes())))
764 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
765 mem = find_memory_block_by_id(block_id);
766 if (WARN_ON_ONCE(!mem))
768 unregister_memory_block_under_nodes(mem);
769 unregister_memory(mem);
773 /* return true if the memory block is offlined, otherwise, return false */
774 bool is_memblock_offlined(struct memory_block *mem)
776 return mem->state == MEM_OFFLINE;
779 static struct attribute *memory_root_attrs[] = {
780 #ifdef CONFIG_ARCH_MEMORY_PROBE
781 &dev_attr_probe.attr,
784 #ifdef CONFIG_MEMORY_FAILURE
785 &dev_attr_soft_offline_page.attr,
786 &dev_attr_hard_offline_page.attr,
789 &dev_attr_block_size_bytes.attr,
790 &dev_attr_auto_online_blocks.attr,
794 static const struct attribute_group memory_root_attr_group = {
795 .attrs = memory_root_attrs,
798 static const struct attribute_group *memory_root_attr_groups[] = {
799 &memory_root_attr_group,
804 * Initialize the sysfs support for memory devices. At the time this function
805 * is called, we cannot have concurrent creation/deletion of memory block
806 * devices, the device_hotplug_lock is not needed.
808 void __init memory_dev_init(void)
811 unsigned long block_sz, nr;
813 /* Validate the configured memory block size */
814 block_sz = memory_block_size_bytes();
815 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
816 panic("Memory block size not suitable: 0x%lx\n", block_sz);
817 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
819 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
821 panic("%s() failed to register subsystem: %d\n", __func__, ret);
824 * Create entries for memory sections that were found
825 * during boot and have been initialized
827 for (nr = 0; nr <= __highest_present_section_nr;
828 nr += sections_per_block) {
829 ret = add_memory_block(nr);
831 panic("%s() failed to add memory block: %d\n", __func__,
837 * walk_memory_blocks - walk through all present memory blocks overlapped
838 * by the range [start, start + size)
840 * @start: start address of the memory range
841 * @size: size of the memory range
842 * @arg: argument passed to func
843 * @func: callback for each memory section walked
845 * This function walks through all present memory blocks overlapped by the
846 * range [start, start + size), calling func on each memory block.
848 * In case func() returns an error, walking is aborted and the error is
851 * Called under device_hotplug_lock.
853 int walk_memory_blocks(unsigned long start, unsigned long size,
854 void *arg, walk_memory_blocks_func_t func)
856 const unsigned long start_block_id = phys_to_block_id(start);
857 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
858 struct memory_block *mem;
859 unsigned long block_id;
865 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
866 mem = find_memory_block_by_id(block_id);
870 ret = func(mem, arg);
871 put_device(&mem->dev);
878 struct for_each_memory_block_cb_data {
879 walk_memory_blocks_func_t func;
883 static int for_each_memory_block_cb(struct device *dev, void *data)
885 struct memory_block *mem = to_memory_block(dev);
886 struct for_each_memory_block_cb_data *cb_data = data;
888 return cb_data->func(mem, cb_data->arg);
892 * for_each_memory_block - walk through all present memory blocks
894 * @arg: argument passed to func
895 * @func: callback for each memory block walked
897 * This function walks through all present memory blocks, calling func on
900 * In case func() returns an error, walking is aborted and the error is
903 int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
905 struct for_each_memory_block_cb_data cb_data = {
910 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
911 for_each_memory_block_cb);
915 * This is an internal helper to unify allocation and initialization of
916 * memory groups. Note that the passed memory group will be copied to a
917 * dynamically allocated memory group. After this call, the passed
918 * memory group should no longer be used.
920 static int memory_group_register(struct memory_group group)
922 struct memory_group *new_group;
926 if (!node_possible(group.nid))
929 new_group = kzalloc(sizeof(group), GFP_KERNEL);
933 INIT_LIST_HEAD(&new_group->memory_blocks);
935 ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
945 * memory_group_register_static() - Register a static memory group.
947 * @max_pages: The maximum number of pages we'll have in this static memory
950 * Register a new static memory group and return the memory group id.
951 * All memory in the group belongs to a single unit, such as a DIMM. All
952 * memory belonging to a static memory group is added in one go to be removed
953 * in one go -- it's static.
955 * Returns an error if out of memory, if the node id is invalid, if no new
956 * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
957 * returns the new memory group id.
959 int memory_group_register_static(int nid, unsigned long max_pages)
961 struct memory_group group = {
964 .max_pages = max_pages,
970 return memory_group_register(group);
972 EXPORT_SYMBOL_GPL(memory_group_register_static);
975 * memory_group_register_dynamic() - Register a dynamic memory group.
977 * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
980 * Register a new dynamic memory group and return the memory group id.
981 * Memory within a dynamic memory group is added/removed dynamically
984 * Returns an error if out of memory, if the node id is invalid, if no new
985 * memory groups can be registered, or if unit_pages is invalid (0, not a
986 * power of two, smaller than a single memory block). Otherwise, returns the
987 * new memory group id.
989 int memory_group_register_dynamic(int nid, unsigned long unit_pages)
991 struct memory_group group = {
995 .unit_pages = unit_pages,
999 if (!unit_pages || !is_power_of_2(unit_pages) ||
1000 unit_pages < PHYS_PFN(memory_block_size_bytes()))
1002 return memory_group_register(group);
1004 EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
1007 * memory_group_unregister() - Unregister a memory group.
1008 * @mgid: the memory group id
1010 * Unregister a memory group. If any memory block still belongs to this
1011 * memory group, unregistering will fail.
1013 * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
1014 * memory blocks still belong to this memory group and returns 0 if
1015 * unregistering succeeded.
1017 int memory_group_unregister(int mgid)
1019 struct memory_group *group;
1024 group = xa_load(&memory_groups, mgid);
1027 if (!list_empty(&group->memory_blocks))
1029 xa_erase(&memory_groups, mgid);
1033 EXPORT_SYMBOL_GPL(memory_group_unregister);
1036 * This is an internal helper only to be used in core memory hotplug code to
1037 * lookup a memory group. We don't care about locking, as we don't expect a
1038 * memory group to get unregistered while adding memory to it -- because
1039 * the group and the memory is managed by the same driver.
1041 struct memory_group *memory_group_find_by_id(int mgid)
1043 return xa_load(&memory_groups, mgid);