Merge tag 'libata-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / base / memory.c
index 16f5a36..365cd4a 100644 (file)
@@ -86,6 +86,7 @@ static DEFINE_XARRAY(memory_blocks);
  * Memory groups, indexed by memory group id (mgid).
  */
 static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
+#define MEMORY_GROUP_MARK_DYNAMIC      XA_MARK_1
 
 static BLOCKING_NOTIFIER_HEAD(memory_chain);
 
@@ -182,7 +183,8 @@ static int memory_block_online(struct memory_block *mem)
        struct zone *zone;
        int ret;
 
-       zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
+       zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
+                                 start_pfn, nr_pages);
 
        /*
         * Although vmemmap pages have a different lifecycle than the pages
@@ -198,7 +200,7 @@ static int memory_block_online(struct memory_block *mem)
        }
 
        ret = online_pages(start_pfn + nr_vmemmap_pages,
-                          nr_pages - nr_vmemmap_pages, zone);
+                          nr_pages - nr_vmemmap_pages, zone, mem->group);
        if (ret) {
                if (nr_vmemmap_pages)
                        mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
@@ -210,7 +212,7 @@ static int memory_block_online(struct memory_block *mem)
         * now already properly populated.
         */
        if (nr_vmemmap_pages)
-               adjust_present_page_count(pfn_to_page(start_pfn),
+               adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
                                          nr_vmemmap_pages);
 
        return ret;
@@ -228,16 +230,16 @@ static int memory_block_offline(struct memory_block *mem)
         * can properly be torn down in offline_pages().
         */
        if (nr_vmemmap_pages)
-               adjust_present_page_count(pfn_to_page(start_pfn),
+               adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
                                          -nr_vmemmap_pages);
 
        ret = offline_pages(start_pfn + nr_vmemmap_pages,
-                           nr_pages - nr_vmemmap_pages);
+                           nr_pages - nr_vmemmap_pages, mem->group);
        if (ret) {
                /* offline_pages() failed. Account back. */
                if (nr_vmemmap_pages)
                        adjust_present_page_count(pfn_to_page(start_pfn),
-                                                 nr_vmemmap_pages);
+                                                 mem->group, nr_vmemmap_pages);
                return ret;
        }
 
@@ -379,12 +381,13 @@ static ssize_t phys_device_show(struct device *dev,
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static int print_allowed_zone(char *buf, int len, int nid,
+                             struct memory_group *group,
                              unsigned long start_pfn, unsigned long nr_pages,
                              int online_type, struct zone *default_zone)
 {
        struct zone *zone;
 
-       zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+       zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
        if (zone == default_zone)
                return 0;
 
@@ -397,9 +400,10 @@ static ssize_t valid_zones_show(struct device *dev,
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+       struct memory_group *group = mem->group;
        struct zone *default_zone;
+       int nid = mem->nid;
        int len = 0;
-       int nid;
 
        /*
         * Check the existing zone. Make sure that we do that only on the
@@ -418,14 +422,13 @@ static ssize_t valid_zones_show(struct device *dev,
                goto out;
        }
 
-       nid = mem->nid;
-       default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
-                                         nr_pages);
+       default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
+                                         start_pfn, nr_pages);
 
        len += sysfs_emit_at(buf, len, "%s", default_zone->name);
-       len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+       len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
                                  MMOP_ONLINE_KERNEL, default_zone);
-       len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+       len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
                                  MMOP_ONLINE_MOVABLE, default_zone);
 out:
        len += sysfs_emit_at(buf, len, "\n");
@@ -583,9 +586,9 @@ static struct memory_block *find_memory_block_by_id(unsigned long block_id)
 /*
  * Called under device_hotplug_lock.
  */
-struct memory_block *find_memory_block(struct mem_section *section)
+struct memory_block *find_memory_block(unsigned long section_nr)
 {
-       unsigned long block_id = memory_block_id(__section_nr(section));
+       unsigned long block_id = memory_block_id(section_nr);
 
        return find_memory_block_by_id(block_id);
 }
@@ -937,6 +940,8 @@ static int memory_group_register(struct memory_group group)
        if (ret) {
                kfree(new_group);
                return ret;
+       } else if (group.is_dynamic) {
+               xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
        }
        return mgid;
 }
@@ -1042,3 +1047,30 @@ struct memory_group *memory_group_find_by_id(int mgid)
 {
        return xa_load(&memory_groups, mgid);
 }
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * walk all dynamic memory groups excluding a given memory group, either
+ * belonging to a specific node, or belonging to any node.
+ */
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+                              struct memory_group *excluded, void *arg)
+{
+       struct memory_group *group;
+       unsigned long index;
+       int ret = 0;
+
+       xa_for_each_marked(&memory_groups, index, group,
+                          MEMORY_GROUP_MARK_DYNAMIC) {
+               if (group == excluded)
+                       continue;
+#ifdef CONFIG_NUMA
+               if (nid != NUMA_NO_NODE && group->nid != nid)
+                       continue;
+#endif /* CONFIG_NUMA */
+               ret = func(group, arg);
+               if (ret)
+                       break;
+       }
+       return ret;
+}