1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_MEMBLOCK_H
3 #define _LINUX_MEMBLOCK_H
7 * Logical memory blocks.
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
12 #include <linux/init.h>
16 extern unsigned long max_low_pfn;
17 extern unsigned long min_low_pfn;
22 extern unsigned long max_pfn;
24 * highest possible page
26 extern unsigned long long max_possible_pfn;
29 * enum memblock_flags - definition of memory region attributes
30 * @MEMBLOCK_NONE: no special request
31 * @MEMBLOCK_HOTPLUG: hotpluggable region
32 * @MEMBLOCK_MIRROR: mirrored region
33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
36 MEMBLOCK_NONE = 0x0, /* No special request */
37 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
38 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
39 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
43 * struct memblock_region - represents a memory region
44 * @base: physical address of the region
45 * @size: size of the region
46 * @flags: memory region attributes
49 struct memblock_region {
52 enum memblock_flags flags;
53 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
59 * struct memblock_type - collection of memory regions of certain type
60 * @cnt: number of regions
61 * @max: size of the allocated array
62 * @total_size: size of all regions
63 * @regions: array of regions
64 * @name: the memory type symbolic name
66 struct memblock_type {
69 phys_addr_t total_size;
70 struct memblock_region *regions;
75 * struct memblock - memblock allocator metadata
76 * @bottom_up: is bottom up direction?
77 * @current_limit: physical address of the current allocation limit
78 * @memory: usabe memory regions
79 * @reserved: reserved memory regions
80 * @physmem: all physical memory
83 bool bottom_up; /* is bottom up direction? */
84 phys_addr_t current_limit;
85 struct memblock_type memory;
86 struct memblock_type reserved;
87 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
88 struct memblock_type physmem;
92 extern struct memblock memblock;
93 extern int memblock_debug;
95 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
96 #define __init_memblock __meminit
97 #define __initdata_memblock __meminitdata
98 void memblock_discard(void);
100 #define __init_memblock
101 #define __initdata_memblock
102 static inline void memblock_discard(void) {}
105 #define memblock_dbg(fmt, ...) \
106 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
108 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
109 phys_addr_t size, phys_addr_t align);
110 void memblock_allow_resize(void);
111 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
112 int memblock_add(phys_addr_t base, phys_addr_t size);
113 int memblock_remove(phys_addr_t base, phys_addr_t size);
114 int memblock_free(phys_addr_t base, phys_addr_t size);
115 int memblock_reserve(phys_addr_t base, phys_addr_t size);
116 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
117 int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
119 void memblock_trim_memory(phys_addr_t align);
120 bool memblock_overlaps_region(struct memblock_type *type,
121 phys_addr_t base, phys_addr_t size);
122 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
123 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
124 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
125 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
126 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
128 unsigned long memblock_free_all(void);
129 void reset_node_managed_pages(pg_data_t *pgdat);
130 void reset_all_zones_managed_pages(void);
132 /* Low level functions */
133 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
134 struct memblock_type *type_a,
135 struct memblock_type *type_b, phys_addr_t *out_start,
136 phys_addr_t *out_end, int *out_nid);
138 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
139 struct memblock_type *type_a,
140 struct memblock_type *type_b, phys_addr_t *out_start,
141 phys_addr_t *out_end, int *out_nid);
143 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
144 phys_addr_t *out_end);
146 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
149 * for_each_mem_range - iterate through memblock areas from type_a and not
150 * included in type_b. Or just type_a if type_b is NULL.
151 * @i: u64 used as loop variable
152 * @type_a: ptr to memblock_type to iterate
153 * @type_b: ptr to memblock_type which excludes from the iteration
154 * @nid: node selector, %NUMA_NO_NODE for all nodes
155 * @flags: pick from blocks based on memory attributes
156 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
157 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
158 * @p_nid: ptr to int for nid of the range, can be %NULL
160 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
161 p_start, p_end, p_nid) \
162 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
163 p_start, p_end, p_nid); \
164 i != (u64)ULLONG_MAX; \
165 __next_mem_range(&i, nid, flags, type_a, type_b, \
166 p_start, p_end, p_nid))
169 * for_each_mem_range_rev - reverse iterate through memblock areas from
170 * type_a and not included in type_b. Or just type_a if type_b is NULL.
171 * @i: u64 used as loop variable
172 * @type_a: ptr to memblock_type to iterate
173 * @type_b: ptr to memblock_type which excludes from the iteration
174 * @nid: node selector, %NUMA_NO_NODE for all nodes
175 * @flags: pick from blocks based on memory attributes
176 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
177 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
178 * @p_nid: ptr to int for nid of the range, can be %NULL
180 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
181 p_start, p_end, p_nid) \
182 for (i = (u64)ULLONG_MAX, \
183 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
184 p_start, p_end, p_nid); \
185 i != (u64)ULLONG_MAX; \
186 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
187 p_start, p_end, p_nid))
190 * for_each_reserved_mem_region - iterate over all reserved memblock areas
191 * @i: u64 used as loop variable
192 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
193 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
195 * Walks over reserved areas of memblock. Available as soon as memblock
198 #define for_each_reserved_mem_region(i, p_start, p_end) \
199 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
200 i != (u64)ULLONG_MAX; \
201 __next_reserved_mem_region(&i, p_start, p_end))
203 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
205 return m->flags & MEMBLOCK_HOTPLUG;
208 static inline bool memblock_is_mirror(struct memblock_region *m)
210 return m->flags & MEMBLOCK_MIRROR;
213 static inline bool memblock_is_nomap(struct memblock_region *m)
215 return m->flags & MEMBLOCK_NOMAP;
218 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
219 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
220 unsigned long *end_pfn);
221 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
222 unsigned long *out_end_pfn, int *out_nid);
225 * for_each_mem_pfn_range - early memory pfn range iterator
226 * @i: an integer used as loop variable
227 * @nid: node selector, %MAX_NUMNODES for all nodes
228 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
229 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
230 * @p_nid: ptr to int for nid of the range, can be %NULL
232 * Walks over configured memory ranges.
234 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
235 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
236 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
237 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
239 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
240 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
241 unsigned long *out_spfn,
242 unsigned long *out_epfn);
244 * for_each_free_mem_range_in_zone - iterate through zone specific free
246 * @i: u64 used as loop variable
247 * @zone: zone in which all of the memory blocks reside
248 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
249 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
251 * Walks over free (memory && !reserved) areas of memblock in a specific
252 * zone. Available once memblock and an empty zone is initialized. The main
253 * assumption is that the zone start, end, and pgdat have been associated.
254 * This way we can use the zone to determine NUMA node, and if a given part
255 * of the memblock is valid for the zone.
257 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
259 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
261 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
264 * for_each_free_mem_range_in_zone_from - iterate through zone specific
265 * free memblock areas from a given point
266 * @i: u64 used as loop variable
267 * @zone: zone in which all of the memory blocks reside
268 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
269 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
271 * Walks over free (memory && !reserved) areas of memblock in a specific
272 * zone, continuing from current position. Available as soon as memblock is
275 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
276 for (; i != U64_MAX; \
277 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
278 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
281 * for_each_free_mem_range - iterate through free memblock areas
282 * @i: u64 used as loop variable
283 * @nid: node selector, %NUMA_NO_NODE for all nodes
284 * @flags: pick from blocks based on memory attributes
285 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
286 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
287 * @p_nid: ptr to int for nid of the range, can be %NULL
289 * Walks over free (memory && !reserved) areas of memblock. Available as
290 * soon as memblock is initialized.
292 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
293 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
294 nid, flags, p_start, p_end, p_nid)
297 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
298 * @i: u64 used as loop variable
299 * @nid: node selector, %NUMA_NO_NODE for all nodes
300 * @flags: pick from blocks based on memory attributes
301 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
302 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
303 * @p_nid: ptr to int for nid of the range, can be %NULL
305 * Walks over free (memory && !reserved) areas of memblock in reverse
306 * order. Available as soon as memblock is initialized.
308 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
310 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
311 nid, flags, p_start, p_end, p_nid)
313 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
314 int memblock_set_node(phys_addr_t base, phys_addr_t size,
315 struct memblock_type *type, int nid);
317 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
322 static inline int memblock_get_region_node(const struct memblock_region *r)
327 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
331 static inline int memblock_get_region_node(const struct memblock_region *r)
335 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
337 /* Flags for memblock allocation APIs */
338 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
339 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
340 #define MEMBLOCK_ALLOC_KASAN 1
342 /* We are using top down, so it is safe to use 0 here */
343 #define MEMBLOCK_LOW_LIMIT 0
345 #ifndef ARCH_LOW_ADDRESS_LIMIT
346 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
349 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
350 phys_addr_t start, phys_addr_t end);
351 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
353 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
356 return memblock_phys_alloc_range(size, align, 0,
357 MEMBLOCK_ALLOC_ACCESSIBLE);
360 void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
361 phys_addr_t min_addr, phys_addr_t max_addr,
363 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
364 phys_addr_t min_addr, phys_addr_t max_addr,
366 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
367 phys_addr_t min_addr, phys_addr_t max_addr,
370 static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
372 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
373 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
376 static inline void * __init memblock_alloc_raw(phys_addr_t size,
379 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
380 MEMBLOCK_ALLOC_ACCESSIBLE,
384 static inline void * __init memblock_alloc_from(phys_addr_t size,
386 phys_addr_t min_addr)
388 return memblock_alloc_try_nid(size, align, min_addr,
389 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
392 static inline void * __init memblock_alloc_low(phys_addr_t size,
395 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
396 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
399 static inline void * __init memblock_alloc_node(phys_addr_t size,
400 phys_addr_t align, int nid)
402 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
403 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
406 static inline void __init memblock_free_early(phys_addr_t base,
409 memblock_free(base, size);
412 static inline void __init memblock_free_early_nid(phys_addr_t base,
413 phys_addr_t size, int nid)
415 memblock_free(base, size);
418 static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
420 __memblock_free_late(base, size);
424 * Set the allocation direction to bottom-up or top-down.
426 static inline void __init memblock_set_bottom_up(bool enable)
428 memblock.bottom_up = enable;
432 * Check if the allocation direction is bottom-up or not.
433 * if this is true, that said, memblock will allocate memory
434 * in bottom-up direction.
436 static inline bool memblock_bottom_up(void)
438 return memblock.bottom_up;
441 phys_addr_t memblock_phys_mem_size(void);
442 phys_addr_t memblock_reserved_size(void);
443 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
444 phys_addr_t memblock_start_of_DRAM(void);
445 phys_addr_t memblock_end_of_DRAM(void);
446 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
447 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
448 void memblock_mem_limit_remove_map(phys_addr_t limit);
449 bool memblock_is_memory(phys_addr_t addr);
450 bool memblock_is_map_memory(phys_addr_t addr);
451 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
452 bool memblock_is_reserved(phys_addr_t addr);
453 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
455 extern void __memblock_dump_all(void);
457 static inline void memblock_dump_all(void)
460 __memblock_dump_all();
464 * memblock_set_current_limit - Set the current allocation limit to allow
465 * limiting allocations to what is currently
466 * accessible during boot
467 * @limit: New limit value (physical address)
469 void memblock_set_current_limit(phys_addr_t limit);
472 phys_addr_t memblock_get_current_limit(void);
475 * pfn conversion functions
477 * While the memory MEMBLOCKs should always be page aligned, the reserved
478 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
479 * idea of what they return for such non aligned MEMBLOCKs.
483 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
484 * @reg: memblock_region structure
486 * Return: the lowest pfn intersecting with the memory region
488 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
490 return PFN_UP(reg->base);
494 * memblock_region_memory_end_pfn - get the end pfn of the memory region
495 * @reg: memblock_region structure
497 * Return: the end_pfn of the reserved region
499 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
501 return PFN_DOWN(reg->base + reg->size);
505 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
506 * @reg: memblock_region structure
508 * Return: the lowest pfn intersecting with the reserved region
510 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
512 return PFN_DOWN(reg->base);
516 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
517 * @reg: memblock_region structure
519 * Return: the end_pfn of the reserved region
521 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
523 return PFN_UP(reg->base + reg->size);
526 #define for_each_memblock(memblock_type, region) \
527 for (region = memblock.memblock_type.regions; \
528 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
531 #define for_each_memblock_type(i, memblock_type, rgn) \
532 for (i = 0, rgn = &memblock_type->regions[0]; \
533 i < memblock_type->cnt; \
534 i++, rgn = &memblock_type->regions[i])
536 extern void *alloc_large_system_hash(const char *tablename,
537 unsigned long bucketsize,
538 unsigned long numentries,
541 unsigned int *_hash_shift,
542 unsigned int *_hash_mask,
543 unsigned long low_limit,
544 unsigned long high_limit);
546 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
547 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
548 * shift passed via *_hash_shift */
549 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */
551 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
552 * sufficient vmalloc space.
555 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
556 extern int hashdist; /* Distribute hashes across NUMA nodes? */
561 #ifdef CONFIG_MEMTEST
562 extern void early_memtest(phys_addr_t start, phys_addr_t end);
564 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
569 #endif /* __KERNEL__ */
571 #endif /* _LINUX_MEMBLOCK_H */