* :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
* performs such an assignment directly.
*
- * Once memblock is setup the memory can be allocated using either
- * memblock or bootmem APIs.
+ * Once memblock is setup the memory can be allocated using one of the
+ * API variants:
+ *
+ * * :c:func:`memblock_phys_alloc*` - these functions return the
+ * **physical** address of the allocated memory
+ * * :c:func:`memblock_alloc*` - these functions return the **virtual**
+ * address of the allocated memory.
+ *
+ * Note, that both API variants use implict assumptions about allowed
+ * memory ranges and the fallback methods. Consult the documentation
+ * of :c:func:`memblock_alloc_internal` and
+ * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
+ * description.
*
* As the system boot progresses, the architecture specific
* :c:func:`mem_init` function frees all the memory to the buddy page
else
in_slab = &memblock_reserved_in_slab;
- /* Try to find some space for it.
- *
- * WARNING: We assume that either slab_is_available() and we use it or
- * we use MEMBLOCK for allocations. That means that this is unsafe to
- * use when bootmem is currently active (unless bootmem itself is
- * implemented on top of MEMBLOCK which isn't the case yet)
- *
- * This should however not be an issue for now, as we currently only
- * call into MEMBLOCK while it's still active, or much later when slab
- * is active for memory hotplug operations
- */
+ /* Try to find some space for it */
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
addr = new_array ? __pa(new_array) : 0;
if (ret)
return ret;
- for (i = start_rgn; i < end_rgn; i++)
+ for (i = start_rgn; i < end_rgn; i++) {
+ struct memblock_region *r = &type->regions[i];
+
if (set)
- memblock_set_region_flags(&type->regions[i], flag);
+ r->flags |= flag;
else
- memblock_clear_region_flags(&type->regions[i], flag);
+ r->flags &= ~flag;
+ }
memblock_merge_regions(type);
return 0;
*idx = ULLONG_MAX;
}
+static bool should_skip_region(struct memblock_region *m, int nid, int flags)
+{
+ int m_nid = memblock_get_region_node(m);
+
+ /* only memory regions are associated with nodes, check it */
+ if (nid != NUMA_NO_NODE && nid != m_nid)
+ return true;
+
+ /* skip hotpluggable memory regions if needed */
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+ return true;
+
+ /* if we want mirror memory skip non-mirror memory regions */
+ if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
+ return true;
+
+ /* skip nomap memory unless we were asked for it explicitly */
+ if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+ return true;
+
+ return false;
+}
+
/**
- * __next__mem_range - next function for for_each_free_mem_range() etc.
+ * __next_mem_range - next function for for_each_free_mem_range() etc.
* @idx: pointer to u64 loop variable
* @nid: node selector, %NUMA_NO_NODE for all nodes
* @flags: pick from blocks based on memory attributes
phys_addr_t m_end = m->base + m->size;
int m_nid = memblock_get_region_node(m);
- /* only memory regions are associated with nodes, check it */
- if (nid != NUMA_NO_NODE && nid != m_nid)
- continue;
-
- /* skip hotpluggable memory regions if needed */
- if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
- continue;
-
- /* if we want mirror memory skip non-mirror memory regions */
- if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
- continue;
-
- /* skip nomap memory unless we were asked for it explicitly */
- if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+ if (should_skip_region(m, nid, flags))
continue;
if (!type_b) {
phys_addr_t m_end = m->base + m->size;
int m_nid = memblock_get_region_node(m);
- /* only memory regions are associated with nodes, check it */
- if (nid != NUMA_NO_NODE && nid != m_nid)
- continue;
-
- /* skip hotpluggable memory regions if needed */
- if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
- continue;
-
- /* if we want mirror memory skip non-mirror memory regions */
- if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
- continue;
-
- /* skip nomap memory unless we were asked for it explicitly */
- if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+ if (should_skip_region(m, nid, flags))
continue;
if (!type_b) {
return found;
}
+/**
+ * memblock_phys_alloc_range - allocate a memory block inside specified range
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @start: the lower bound of the memory region to allocate (physical address)
+ * @end: the upper bound of the memory region to allocate (physical address)
+ *
+ * Allocate @size bytes in the between @start and @end.
+ *
+ * Return: physical address of the allocated memory block on success,
+ * %0 on failure.
+ */
phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
phys_addr_t align,
phys_addr_t start,
return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
}
+/**
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Allocates memory block from the specified NUMA node. If the node
+ * has no available memory, attempts to allocated from any node in the
+ * system.
+ *
+ * Return: physical address of the allocated memory block on success,
+ * %0 on failure.
+ */
phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
return memblock_alloc_range_nid(size, align, 0,
}
/**
- * __memblock_free_late - free bootmem block pages directly to buddy allocator
+ * __memblock_free_late - free pages directly to buddy allocator
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
- * This is only useful when the bootmem allocator has already been torn
+ * This is only useful when the memblock allocator has already been torn
* down, but we are still initializing the system. Pages are released directly
- * to the buddy allocator, no bootmem metadata is updated because it is gone.
+ * to the buddy allocator.
*/
void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
{