perf list: Allow plurals for metric, metricgroup
[linux-2.6-microblaze.git] / mm / sparse.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sparse memory mappings.
4  */
5 #include <linux/mm.h>
6 #include <linux/slab.h>
7 #include <linux/mmzone.h>
8 #include <linux/memblock.h>
9 #include <linux/compiler.h>
10 #include <linux/highmem.h>
11 #include <linux/export.h>
12 #include <linux/spinlock.h>
13 #include <linux/vmalloc.h>
14
15 #include "internal.h"
16 #include <asm/dma.h>
17 #include <asm/pgalloc.h>
18 #include <asm/pgtable.h>
19
20 /*
21  * Permanent SPARSEMEM data:
22  *
23  * 1) mem_section       - memory sections, mem_map's for valid memory
24  */
25 #ifdef CONFIG_SPARSEMEM_EXTREME
26 struct mem_section **mem_section;
27 #else
28 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29         ____cacheline_internodealigned_in_smp;
30 #endif
31 EXPORT_SYMBOL(mem_section);
32
33 #ifdef NODE_NOT_IN_PAGE_FLAGS
34 /*
35  * If we did not store the node number in the page then we have to
36  * do a lookup in the section_to_node_table in order to find which
37  * node the page belongs to.
38  */
39 #if MAX_NUMNODES <= 256
40 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 #else
42 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43 #endif
44
45 int page_to_nid(const struct page *page)
46 {
47         return section_to_node_table[page_to_section(page)];
48 }
49 EXPORT_SYMBOL(page_to_nid);
50
51 static void set_section_nid(unsigned long section_nr, int nid)
52 {
53         section_to_node_table[section_nr] = nid;
54 }
55 #else /* !NODE_NOT_IN_PAGE_FLAGS */
56 static inline void set_section_nid(unsigned long section_nr, int nid)
57 {
58 }
59 #endif
60
61 #ifdef CONFIG_SPARSEMEM_EXTREME
62 static noinline struct mem_section __ref *sparse_index_alloc(int nid)
63 {
64         struct mem_section *section = NULL;
65         unsigned long array_size = SECTIONS_PER_ROOT *
66                                    sizeof(struct mem_section);
67
68         if (slab_is_available()) {
69                 section = kzalloc_node(array_size, GFP_KERNEL, nid);
70         } else {
71                 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72                                               nid);
73                 if (!section)
74                         panic("%s: Failed to allocate %lu bytes nid=%d\n",
75                               __func__, array_size, nid);
76         }
77
78         return section;
79 }
80
81 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
82 {
83         unsigned long root = SECTION_NR_TO_ROOT(section_nr);
84         struct mem_section *section;
85
86         /*
87          * An existing section is possible in the sub-section hotplug
88          * case. First hot-add instantiates, follow-on hot-add reuses
89          * the existing section.
90          *
91          * The mem_hotplug_lock resolves the apparent race below.
92          */
93         if (mem_section[root])
94                 return 0;
95
96         section = sparse_index_alloc(nid);
97         if (!section)
98                 return -ENOMEM;
99
100         mem_section[root] = section;
101
102         return 0;
103 }
104 #else /* !SPARSEMEM_EXTREME */
105 static inline int sparse_index_init(unsigned long section_nr, int nid)
106 {
107         return 0;
108 }
109 #endif
110
111 #ifdef CONFIG_SPARSEMEM_EXTREME
112 unsigned long __section_nr(struct mem_section *ms)
113 {
114         unsigned long root_nr;
115         struct mem_section *root = NULL;
116
117         for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
118                 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
119                 if (!root)
120                         continue;
121
122                 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
123                      break;
124         }
125
126         VM_BUG_ON(!root);
127
128         return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
129 }
130 #else
131 unsigned long __section_nr(struct mem_section *ms)
132 {
133         return (unsigned long)(ms - mem_section[0]);
134 }
135 #endif
136
137 /*
138  * During early boot, before section_mem_map is used for an actual
139  * mem_map, we use section_mem_map to store the section's NUMA
140  * node.  This keeps us from having to use another data structure.  The
141  * node information is cleared just before we store the real mem_map.
142  */
143 static inline unsigned long sparse_encode_early_nid(int nid)
144 {
145         return (nid << SECTION_NID_SHIFT);
146 }
147
148 static inline int sparse_early_nid(struct mem_section *section)
149 {
150         return (section->section_mem_map >> SECTION_NID_SHIFT);
151 }
152
153 /* Validate the physical addressing limitations of the model */
154 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155                                                 unsigned long *end_pfn)
156 {
157         unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
158
159         /*
160          * Sanity checks - do not allow an architecture to pass
161          * in larger pfns than the maximum scope of sparsemem:
162          */
163         if (*start_pfn > max_sparsemem_pfn) {
164                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165                         "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166                         *start_pfn, *end_pfn, max_sparsemem_pfn);
167                 WARN_ON_ONCE(1);
168                 *start_pfn = max_sparsemem_pfn;
169                 *end_pfn = max_sparsemem_pfn;
170         } else if (*end_pfn > max_sparsemem_pfn) {
171                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172                         "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173                         *start_pfn, *end_pfn, max_sparsemem_pfn);
174                 WARN_ON_ONCE(1);
175                 *end_pfn = max_sparsemem_pfn;
176         }
177 }
178
179 /*
180  * There are a number of times that we loop over NR_MEM_SECTIONS,
181  * looking for section_present() on each.  But, when we have very
182  * large physical address spaces, NR_MEM_SECTIONS can also be
183  * very large which makes the loops quite long.
184  *
185  * Keeping track of this gives us an easy way to break out of
186  * those loops early.
187  */
188 unsigned long __highest_present_section_nr;
189 static void section_mark_present(struct mem_section *ms)
190 {
191         unsigned long section_nr = __section_nr(ms);
192
193         if (section_nr > __highest_present_section_nr)
194                 __highest_present_section_nr = section_nr;
195
196         ms->section_mem_map |= SECTION_MARKED_PRESENT;
197 }
198
199 static inline unsigned long next_present_section_nr(unsigned long section_nr)
200 {
201         do {
202                 section_nr++;
203                 if (present_section_nr(section_nr))
204                         return section_nr;
205         } while ((section_nr <= __highest_present_section_nr));
206
207         return -1;
208 }
209 #define for_each_present_section_nr(start, section_nr)          \
210         for (section_nr = next_present_section_nr(start-1);     \
211              ((section_nr != -1) &&                             \
212               (section_nr <= __highest_present_section_nr));    \
213              section_nr = next_present_section_nr(section_nr))
214
215 static inline unsigned long first_present_section_nr(void)
216 {
217         return next_present_section_nr(-1);
218 }
219
220 void subsection_mask_set(unsigned long *map, unsigned long pfn,
221                 unsigned long nr_pages)
222 {
223         int idx = subsection_map_index(pfn);
224         int end = subsection_map_index(pfn + nr_pages - 1);
225
226         bitmap_set(map, idx, end - idx + 1);
227 }
228
229 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
230 {
231         int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
232         unsigned long nr, start_sec = pfn_to_section_nr(pfn);
233
234         if (!nr_pages)
235                 return;
236
237         for (nr = start_sec; nr <= end_sec; nr++) {
238                 struct mem_section *ms;
239                 unsigned long pfns;
240
241                 pfns = min(nr_pages, PAGES_PER_SECTION
242                                 - (pfn & ~PAGE_SECTION_MASK));
243                 ms = __nr_to_section(nr);
244                 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
245
246                 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
247                                 pfns, subsection_map_index(pfn),
248                                 subsection_map_index(pfn + pfns - 1));
249
250                 pfn += pfns;
251                 nr_pages -= pfns;
252         }
253 }
254
255 /* Record a memory area against a node. */
256 void __init memory_present(int nid, unsigned long start, unsigned long end)
257 {
258         unsigned long pfn;
259
260 #ifdef CONFIG_SPARSEMEM_EXTREME
261         if (unlikely(!mem_section)) {
262                 unsigned long size, align;
263
264                 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
265                 align = 1 << (INTERNODE_CACHE_SHIFT);
266                 mem_section = memblock_alloc(size, align);
267                 if (!mem_section)
268                         panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
269                               __func__, size, align);
270         }
271 #endif
272
273         start &= PAGE_SECTION_MASK;
274         mminit_validate_memmodel_limits(&start, &end);
275         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
276                 unsigned long section = pfn_to_section_nr(pfn);
277                 struct mem_section *ms;
278
279                 sparse_index_init(section, nid);
280                 set_section_nid(section, nid);
281
282                 ms = __nr_to_section(section);
283                 if (!ms->section_mem_map) {
284                         ms->section_mem_map = sparse_encode_early_nid(nid) |
285                                                         SECTION_IS_ONLINE;
286                         section_mark_present(ms);
287                 }
288         }
289 }
290
291 /*
292  * Mark all memblocks as present using memory_present(). This is a
293  * convienence function that is useful for a number of arches
294  * to mark all of the systems memory as present during initialization.
295  */
296 void __init memblocks_present(void)
297 {
298         struct memblock_region *reg;
299
300         for_each_memblock(memory, reg) {
301                 memory_present(memblock_get_region_node(reg),
302                                memblock_region_memory_base_pfn(reg),
303                                memblock_region_memory_end_pfn(reg));
304         }
305 }
306
307 /*
308  * Subtle, we encode the real pfn into the mem_map such that
309  * the identity pfn - section_mem_map will return the actual
310  * physical page frame number.
311  */
312 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
313 {
314         unsigned long coded_mem_map =
315                 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
316         BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
317         BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
318         return coded_mem_map;
319 }
320
321 /*
322  * Decode mem_map from the coded memmap
323  */
324 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
325 {
326         /* mask off the extra low bits of information */
327         coded_mem_map &= SECTION_MAP_MASK;
328         return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
329 }
330
331 static void __meminit sparse_init_one_section(struct mem_section *ms,
332                 unsigned long pnum, struct page *mem_map,
333                 struct mem_section_usage *usage, unsigned long flags)
334 {
335         ms->section_mem_map &= ~SECTION_MAP_MASK;
336         ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
337                 | SECTION_HAS_MEM_MAP | flags;
338         ms->usage = usage;
339 }
340
341 static unsigned long usemap_size(void)
342 {
343         return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
344 }
345
346 size_t mem_section_usage_size(void)
347 {
348         return sizeof(struct mem_section_usage) + usemap_size();
349 }
350
351 #ifdef CONFIG_MEMORY_HOTREMOVE
352 static struct mem_section_usage * __init
353 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
354                                          unsigned long size)
355 {
356         struct mem_section_usage *usage;
357         unsigned long goal, limit;
358         int nid;
359         /*
360          * A page may contain usemaps for other sections preventing the
361          * page being freed and making a section unremovable while
362          * other sections referencing the usemap remain active. Similarly,
363          * a pgdat can prevent a section being removed. If section A
364          * contains a pgdat and section B contains the usemap, both
365          * sections become inter-dependent. This allocates usemaps
366          * from the same section as the pgdat where possible to avoid
367          * this problem.
368          */
369         goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
370         limit = goal + (1UL << PA_SECTION_SHIFT);
371         nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
372 again:
373         usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
374         if (!usage && limit) {
375                 limit = 0;
376                 goto again;
377         }
378         return usage;
379 }
380
381 static void __init check_usemap_section_nr(int nid,
382                 struct mem_section_usage *usage)
383 {
384         unsigned long usemap_snr, pgdat_snr;
385         static unsigned long old_usemap_snr;
386         static unsigned long old_pgdat_snr;
387         struct pglist_data *pgdat = NODE_DATA(nid);
388         int usemap_nid;
389
390         /* First call */
391         if (!old_usemap_snr) {
392                 old_usemap_snr = NR_MEM_SECTIONS;
393                 old_pgdat_snr = NR_MEM_SECTIONS;
394         }
395
396         usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
397         pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
398         if (usemap_snr == pgdat_snr)
399                 return;
400
401         if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
402                 /* skip redundant message */
403                 return;
404
405         old_usemap_snr = usemap_snr;
406         old_pgdat_snr = pgdat_snr;
407
408         usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
409         if (usemap_nid != nid) {
410                 pr_info("node %d must be removed before remove section %ld\n",
411                         nid, usemap_snr);
412                 return;
413         }
414         /*
415          * There is a circular dependency.
416          * Some platforms allow un-removable section because they will just
417          * gather other removable sections for dynamic partitioning.
418          * Just notify un-removable section's number here.
419          */
420         pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
421                 usemap_snr, pgdat_snr, nid);
422 }
423 #else
424 static struct mem_section_usage * __init
425 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
426                                          unsigned long size)
427 {
428         return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
429 }
430
431 static void __init check_usemap_section_nr(int nid,
432                 struct mem_section_usage *usage)
433 {
434 }
435 #endif /* CONFIG_MEMORY_HOTREMOVE */
436
437 #ifdef CONFIG_SPARSEMEM_VMEMMAP
438 static unsigned long __init section_map_size(void)
439 {
440         return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
441 }
442
443 #else
444 static unsigned long __init section_map_size(void)
445 {
446         return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
447 }
448
449 struct page __init *__populate_section_memmap(unsigned long pfn,
450                 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
451 {
452         unsigned long size = section_map_size();
453         struct page *map = sparse_buffer_alloc(size);
454         phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
455
456         if (map)
457                 return map;
458
459         map = memblock_alloc_try_nid(size,
460                                           PAGE_SIZE, addr,
461                                           MEMBLOCK_ALLOC_ACCESSIBLE, nid);
462         if (!map)
463                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
464                       __func__, size, PAGE_SIZE, nid, &addr);
465
466         return map;
467 }
468 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
469
470 static void *sparsemap_buf __meminitdata;
471 static void *sparsemap_buf_end __meminitdata;
472
473 static void __init sparse_buffer_init(unsigned long size, int nid)
474 {
475         phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
476         WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
477         sparsemap_buf =
478                 memblock_alloc_try_nid_raw(size, PAGE_SIZE,
479                                                 addr,
480                                                 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
481         sparsemap_buf_end = sparsemap_buf + size;
482 }
483
484 static void __init sparse_buffer_fini(void)
485 {
486         unsigned long size = sparsemap_buf_end - sparsemap_buf;
487
488         if (sparsemap_buf && size > 0)
489                 memblock_free_early(__pa(sparsemap_buf), size);
490         sparsemap_buf = NULL;
491 }
492
493 void * __meminit sparse_buffer_alloc(unsigned long size)
494 {
495         void *ptr = NULL;
496
497         if (sparsemap_buf) {
498                 ptr = PTR_ALIGN(sparsemap_buf, size);
499                 if (ptr + size > sparsemap_buf_end)
500                         ptr = NULL;
501                 else
502                         sparsemap_buf = ptr + size;
503         }
504         return ptr;
505 }
506
507 void __weak __meminit vmemmap_populate_print_last(void)
508 {
509 }
510
511 /*
512  * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
513  * And number of present sections in this node is map_count.
514  */
515 static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
516                                    unsigned long pnum_end,
517                                    unsigned long map_count)
518 {
519         struct mem_section_usage *usage;
520         unsigned long pnum;
521         struct page *map;
522
523         usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
524                         mem_section_usage_size() * map_count);
525         if (!usage) {
526                 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
527                 goto failed;
528         }
529         sparse_buffer_init(map_count * section_map_size(), nid);
530         for_each_present_section_nr(pnum_begin, pnum) {
531                 unsigned long pfn = section_nr_to_pfn(pnum);
532
533                 if (pnum >= pnum_end)
534                         break;
535
536                 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
537                                 nid, NULL);
538                 if (!map) {
539                         pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
540                                __func__, nid);
541                         pnum_begin = pnum;
542                         goto failed;
543                 }
544                 check_usemap_section_nr(nid, usage);
545                 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
546                                 SECTION_IS_EARLY);
547                 usage = (void *) usage + mem_section_usage_size();
548         }
549         sparse_buffer_fini();
550         return;
551 failed:
552         /* We failed to allocate, mark all the following pnums as not present */
553         for_each_present_section_nr(pnum_begin, pnum) {
554                 struct mem_section *ms;
555
556                 if (pnum >= pnum_end)
557                         break;
558                 ms = __nr_to_section(pnum);
559                 ms->section_mem_map = 0;
560         }
561 }
562
563 /*
564  * Allocate the accumulated non-linear sections, allocate a mem_map
565  * for each and record the physical to section mapping.
566  */
567 void __init sparse_init(void)
568 {
569         unsigned long pnum_begin = first_present_section_nr();
570         int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
571         unsigned long pnum_end, map_count = 1;
572
573         /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
574         set_pageblock_order();
575
576         for_each_present_section_nr(pnum_begin + 1, pnum_end) {
577                 int nid = sparse_early_nid(__nr_to_section(pnum_end));
578
579                 if (nid == nid_begin) {
580                         map_count++;
581                         continue;
582                 }
583                 /* Init node with sections in range [pnum_begin, pnum_end) */
584                 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
585                 nid_begin = nid;
586                 pnum_begin = pnum_end;
587                 map_count = 1;
588         }
589         /* cover the last node */
590         sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
591         vmemmap_populate_print_last();
592 }
593
594 #ifdef CONFIG_MEMORY_HOTPLUG
595
596 /* Mark all memory sections within the pfn range as online */
597 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
598 {
599         unsigned long pfn;
600
601         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
602                 unsigned long section_nr = pfn_to_section_nr(pfn);
603                 struct mem_section *ms;
604
605                 /* onlining code should never touch invalid ranges */
606                 if (WARN_ON(!valid_section_nr(section_nr)))
607                         continue;
608
609                 ms = __nr_to_section(section_nr);
610                 ms->section_mem_map |= SECTION_IS_ONLINE;
611         }
612 }
613
614 #ifdef CONFIG_MEMORY_HOTREMOVE
615 /* Mark all memory sections within the pfn range as offline */
616 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
617 {
618         unsigned long pfn;
619
620         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
621                 unsigned long section_nr = pfn_to_section_nr(pfn);
622                 struct mem_section *ms;
623
624                 /*
625                  * TODO this needs some double checking. Offlining code makes
626                  * sure to check pfn_valid but those checks might be just bogus
627                  */
628                 if (WARN_ON(!valid_section_nr(section_nr)))
629                         continue;
630
631                 ms = __nr_to_section(section_nr);
632                 ms->section_mem_map &= ~SECTION_IS_ONLINE;
633         }
634 }
635 #endif
636
637 #ifdef CONFIG_SPARSEMEM_VMEMMAP
638 static struct page *populate_section_memmap(unsigned long pfn,
639                 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
640 {
641         return __populate_section_memmap(pfn, nr_pages, nid, altmap);
642 }
643
644 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
645                 struct vmem_altmap *altmap)
646 {
647         unsigned long start = (unsigned long) pfn_to_page(pfn);
648         unsigned long end = start + nr_pages * sizeof(struct page);
649
650         vmemmap_free(start, end, altmap);
651 }
652 static void free_map_bootmem(struct page *memmap)
653 {
654         unsigned long start = (unsigned long)memmap;
655         unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
656
657         vmemmap_free(start, end, NULL);
658 }
659 #else
660 struct page *populate_section_memmap(unsigned long pfn,
661                 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
662 {
663         struct page *page, *ret;
664         unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
665
666         page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
667         if (page)
668                 goto got_map_page;
669
670         ret = vmalloc(memmap_size);
671         if (ret)
672                 goto got_map_ptr;
673
674         return NULL;
675 got_map_page:
676         ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
677 got_map_ptr:
678
679         return ret;
680 }
681
682 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
683                 struct vmem_altmap *altmap)
684 {
685         struct page *memmap = pfn_to_page(pfn);
686
687         if (is_vmalloc_addr(memmap))
688                 vfree(memmap);
689         else
690                 free_pages((unsigned long)memmap,
691                            get_order(sizeof(struct page) * PAGES_PER_SECTION));
692 }
693
694 static void free_map_bootmem(struct page *memmap)
695 {
696         unsigned long maps_section_nr, removing_section_nr, i;
697         unsigned long magic, nr_pages;
698         struct page *page = virt_to_page(memmap);
699
700         nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
701                 >> PAGE_SHIFT;
702
703         for (i = 0; i < nr_pages; i++, page++) {
704                 magic = (unsigned long) page->freelist;
705
706                 BUG_ON(magic == NODE_INFO);
707
708                 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
709                 removing_section_nr = page_private(page);
710
711                 /*
712                  * When this function is called, the removing section is
713                  * logical offlined state. This means all pages are isolated
714                  * from page allocator. If removing section's memmap is placed
715                  * on the same section, it must not be freed.
716                  * If it is freed, page allocator may allocate it which will
717                  * be removed physically soon.
718                  */
719                 if (maps_section_nr != removing_section_nr)
720                         put_page_bootmem(page);
721         }
722 }
723 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
724
725 static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
726                 struct vmem_altmap *altmap)
727 {
728         DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
729         DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
730         struct mem_section *ms = __pfn_to_section(pfn);
731         bool section_is_early = early_section(ms);
732         struct page *memmap = NULL;
733         unsigned long *subsection_map = ms->usage
734                 ? &ms->usage->subsection_map[0] : NULL;
735
736         subsection_mask_set(map, pfn, nr_pages);
737         if (subsection_map)
738                 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
739
740         if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
741                                 "section already deactivated (%#lx + %ld)\n",
742                                 pfn, nr_pages))
743                 return;
744
745         /*
746          * There are 3 cases to handle across two configurations
747          * (SPARSEMEM_VMEMMAP={y,n}):
748          *
749          * 1/ deactivation of a partial hot-added section (only possible
750          * in the SPARSEMEM_VMEMMAP=y case).
751          *    a/ section was present at memory init
752          *    b/ section was hot-added post memory init
753          * 2/ deactivation of a complete hot-added section
754          * 3/ deactivation of a complete section from memory init
755          *
756          * For 1/, when subsection_map does not empty we will not be
757          * freeing the usage map, but still need to free the vmemmap
758          * range.
759          *
760          * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
761          */
762         bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
763         if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
764                 unsigned long section_nr = pfn_to_section_nr(pfn);
765
766                 if (!section_is_early) {
767                         kfree(ms->usage);
768                         ms->usage = NULL;
769                 }
770                 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
771                 ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr);
772         }
773
774         if (section_is_early && memmap)
775                 free_map_bootmem(memmap);
776         else
777                 depopulate_section_memmap(pfn, nr_pages, altmap);
778 }
779
780 static struct page * __meminit section_activate(int nid, unsigned long pfn,
781                 unsigned long nr_pages, struct vmem_altmap *altmap)
782 {
783         DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
784         struct mem_section *ms = __pfn_to_section(pfn);
785         struct mem_section_usage *usage = NULL;
786         unsigned long *subsection_map;
787         struct page *memmap;
788         int rc = 0;
789
790         subsection_mask_set(map, pfn, nr_pages);
791
792         if (!ms->usage) {
793                 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
794                 if (!usage)
795                         return ERR_PTR(-ENOMEM);
796                 ms->usage = usage;
797         }
798         subsection_map = &ms->usage->subsection_map[0];
799
800         if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
801                 rc = -EINVAL;
802         else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
803                 rc = -EEXIST;
804         else
805                 bitmap_or(subsection_map, map, subsection_map,
806                                 SUBSECTIONS_PER_SECTION);
807
808         if (rc) {
809                 if (usage)
810                         ms->usage = NULL;
811                 kfree(usage);
812                 return ERR_PTR(rc);
813         }
814
815         /*
816          * The early init code does not consider partially populated
817          * initial sections, it simply assumes that memory will never be
818          * referenced.  If we hot-add memory into such a section then we
819          * do not need to populate the memmap and can simply reuse what
820          * is already there.
821          */
822         if (nr_pages < PAGES_PER_SECTION && early_section(ms))
823                 return pfn_to_page(pfn);
824
825         memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
826         if (!memmap) {
827                 section_deactivate(pfn, nr_pages, altmap);
828                 return ERR_PTR(-ENOMEM);
829         }
830
831         return memmap;
832 }
833
834 /**
835  * sparse_add_section - add a memory section, or populate an existing one
836  * @nid: The node to add section on
837  * @start_pfn: start pfn of the memory range
838  * @nr_pages: number of pfns to add in the section
839  * @altmap: device page map
840  *
841  * This is only intended for hotplug.
842  *
843  * Return:
844  * * 0          - On success.
845  * * -EEXIST    - Section has been present.
846  * * -ENOMEM    - Out of memory.
847  */
848 int __meminit sparse_add_section(int nid, unsigned long start_pfn,
849                 unsigned long nr_pages, struct vmem_altmap *altmap)
850 {
851         unsigned long section_nr = pfn_to_section_nr(start_pfn);
852         struct mem_section *ms;
853         struct page *memmap;
854         int ret;
855
856         ret = sparse_index_init(section_nr, nid);
857         if (ret < 0)
858                 return ret;
859
860         memmap = section_activate(nid, start_pfn, nr_pages, altmap);
861         if (IS_ERR(memmap))
862                 return PTR_ERR(memmap);
863
864         /*
865          * Poison uninitialized struct pages in order to catch invalid flags
866          * combinations.
867          */
868         page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
869
870         ms = __pfn_to_section(start_pfn);
871         set_section_nid(section_nr, nid);
872         section_mark_present(ms);
873
874         /* Align memmap to section boundary in the subsection case */
875         if (section_nr_to_pfn(section_nr) != start_pfn)
876                 memmap = pfn_to_kaddr(section_nr_to_pfn(section_nr));
877         sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
878
879         return 0;
880 }
881
882 #ifdef CONFIG_MEMORY_FAILURE
883 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
884 {
885         int i;
886
887         if (!memmap)
888                 return;
889
890         /*
891          * A further optimization is to have per section refcounted
892          * num_poisoned_pages.  But that would need more space per memmap, so
893          * for now just do a quick global check to speed up this routine in the
894          * absence of bad pages.
895          */
896         if (atomic_long_read(&num_poisoned_pages) == 0)
897                 return;
898
899         for (i = 0; i < nr_pages; i++) {
900                 if (PageHWPoison(&memmap[i])) {
901                         atomic_long_sub(1, &num_poisoned_pages);
902                         ClearPageHWPoison(&memmap[i]);
903                 }
904         }
905 }
906 #else
907 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
908 {
909 }
910 #endif
911
912 void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
913                 unsigned long nr_pages, unsigned long map_offset,
914                 struct vmem_altmap *altmap)
915 {
916         clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
917                         nr_pages - map_offset);
918         section_deactivate(pfn, nr_pages, altmap);
919 }
920 #endif /* CONFIG_MEMORY_HOTPLUG */