1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/percpu.c - percpu memory allocator
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
8 * Copyright (C) 2017 Facebook Inc.
9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
30 * are not online yet. In short, the first chunk is structured like so:
32 * <Static | [Reserved] | Dynamic>
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
39 * The allocator organizes chunks into lists according to free size and
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
59 * To use this allocator, arch code should do the following:
61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71 #include <linux/bitmap.h>
72 #include <linux/cpumask.h>
73 #include <linux/memblock.h>
74 #include <linux/err.h>
75 #include <linux/lcm.h>
76 #include <linux/list.h>
77 #include <linux/log2.h>
79 #include <linux/module.h>
80 #include <linux/mutex.h>
81 #include <linux/percpu.h>
82 #include <linux/pfn.h>
83 #include <linux/slab.h>
84 #include <linux/spinlock.h>
85 #include <linux/vmalloc.h>
86 #include <linux/workqueue.h>
87 #include <linux/kmemleak.h>
88 #include <linux/sched.h>
89 #include <linux/sched/mm.h>
90 #include <linux/memcontrol.h>
92 #include <asm/cacheflush.h>
93 #include <asm/sections.h>
94 #include <asm/tlbflush.h>
97 #define CREATE_TRACE_POINTS
98 #include <trace/events/percpu.h>
100 #include "percpu-internal.h"
103 * The slots are sorted by the size of the biggest continuous free area.
104 * 1-31 bytes share the same slot.
106 #define PCPU_SLOT_BASE_SHIFT 5
107 /* chunks in slots below this are subject to being sidelined on failed alloc */
108 #define PCPU_SLOT_FAIL_THRESHOLD 3
110 #define PCPU_EMPTY_POP_PAGES_LOW 2
111 #define PCPU_EMPTY_POP_PAGES_HIGH 4
114 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
115 #ifndef __addr_to_pcpu_ptr
116 #define __addr_to_pcpu_ptr(addr) \
117 (void __percpu *)((unsigned long)(addr) - \
118 (unsigned long)pcpu_base_addr + \
119 (unsigned long)__per_cpu_start)
121 #ifndef __pcpu_ptr_to_addr
122 #define __pcpu_ptr_to_addr(ptr) \
123 (void __force *)((unsigned long)(ptr) + \
124 (unsigned long)pcpu_base_addr - \
125 (unsigned long)__per_cpu_start)
127 #else /* CONFIG_SMP */
128 /* on UP, it's always identity mapped */
129 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
130 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
131 #endif /* CONFIG_SMP */
133 static int pcpu_unit_pages __ro_after_init;
134 static int pcpu_unit_size __ro_after_init;
135 static int pcpu_nr_units __ro_after_init;
136 static int pcpu_atom_size __ro_after_init;
137 int pcpu_nr_slots __ro_after_init;
138 static int pcpu_free_slot __ro_after_init;
139 int pcpu_sidelined_slot __ro_after_init;
140 int pcpu_to_depopulate_slot __ro_after_init;
141 static size_t pcpu_chunk_struct_size __ro_after_init;
143 /* cpus with the lowest and highest unit addresses */
144 static unsigned int pcpu_low_unit_cpu __ro_after_init;
145 static unsigned int pcpu_high_unit_cpu __ro_after_init;
147 /* the address of the first chunk which starts with the kernel static area */
148 void *pcpu_base_addr __ro_after_init;
149 EXPORT_SYMBOL_GPL(pcpu_base_addr);
151 static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
152 const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
154 /* group information, used for vm allocation */
155 static int pcpu_nr_groups __ro_after_init;
156 static const unsigned long *pcpu_group_offsets __ro_after_init;
157 static const size_t *pcpu_group_sizes __ro_after_init;
160 * The first chunk which always exists. Note that unlike other
161 * chunks, this one can be allocated and mapped in several different
162 * ways and thus often doesn't live in the vmalloc area.
164 struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
167 * Optional reserved chunk. This chunk reserves part of the first
168 * chunk and serves it for reserved allocations. When the reserved
169 * region doesn't exist, the following variable is NULL.
171 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
173 DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
174 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
176 struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
178 /* chunks which need their map areas extended, protected by pcpu_lock */
179 static LIST_HEAD(pcpu_map_extend_chunks);
182 * The number of empty populated pages, protected by pcpu_lock.
183 * The reserved chunk doesn't contribute to the count.
185 int pcpu_nr_empty_pop_pages;
188 * The number of populated pages in use by the allocator, protected by
189 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
190 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
191 * and increments/decrements this count by 1).
193 static unsigned long pcpu_nr_populated;
196 * Balance work is used to populate or destroy chunks asynchronously. We
197 * try to keep the number of populated free pages between
198 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
201 static void pcpu_balance_workfn(struct work_struct *work);
202 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
203 static bool pcpu_async_enabled __read_mostly;
204 static bool pcpu_atomic_alloc_failed;
206 static void pcpu_schedule_balance_work(void)
208 if (pcpu_async_enabled)
209 schedule_work(&pcpu_balance_work);
213 * pcpu_addr_in_chunk - check if the address is served from this chunk
214 * @chunk: chunk of interest
215 * @addr: percpu address
218 * True if the address is served from this chunk.
220 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
222 void *start_addr, *end_addr;
227 start_addr = chunk->base_addr + chunk->start_offset;
228 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
231 return addr >= start_addr && addr < end_addr;
234 static int __pcpu_size_to_slot(int size)
236 int highbit = fls(size); /* size is in bytes */
237 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
240 static int pcpu_size_to_slot(int size)
242 if (size == pcpu_unit_size)
243 return pcpu_free_slot;
244 return __pcpu_size_to_slot(size);
247 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
249 const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
251 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
252 chunk_md->contig_hint == 0)
255 return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
258 /* set the pointer to a chunk in a page struct */
259 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
261 page->index = (unsigned long)pcpu;
264 /* obtain pointer to a chunk from a page struct */
265 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
267 return (struct pcpu_chunk *)page->index;
270 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
272 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
275 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
277 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
280 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
281 unsigned int cpu, int page_idx)
283 return (unsigned long)chunk->base_addr +
284 pcpu_unit_page_offset(cpu, page_idx);
288 * The following are helper functions to help access bitmaps and convert
289 * between bitmap offsets to address offsets.
291 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
293 return chunk->alloc_map +
294 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
297 static unsigned long pcpu_off_to_block_index(int off)
299 return off / PCPU_BITMAP_BLOCK_BITS;
302 static unsigned long pcpu_off_to_block_off(int off)
304 return off & (PCPU_BITMAP_BLOCK_BITS - 1);
307 static unsigned long pcpu_block_off_to_off(int index, int off)
309 return index * PCPU_BITMAP_BLOCK_BITS + off;
313 * pcpu_check_block_hint - check against the contig hint
314 * @block: block of interest
315 * @bits: size of allocation
316 * @align: alignment of area (max PAGE_SIZE)
318 * Check to see if the allocation can fit in the block's contig hint.
319 * Note, a chunk uses the same hints as a block so this can also check against
320 * the chunk's contig hint.
322 static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
325 int bit_off = ALIGN(block->contig_hint_start, align) -
326 block->contig_hint_start;
328 return bit_off + bits <= block->contig_hint;
332 * pcpu_next_hint - determine which hint to use
333 * @block: block of interest
334 * @alloc_bits: size of allocation
336 * This determines if we should scan based on the scan_hint or first_free.
337 * In general, we want to scan from first_free to fulfill allocations by
338 * first fit. However, if we know a scan_hint at position scan_hint_start
339 * cannot fulfill an allocation, we can begin scanning from there knowing
340 * the contig_hint will be our fallback.
342 static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
345 * The three conditions below determine if we can skip past the
346 * scan_hint. First, does the scan hint exist. Second, is the
347 * contig_hint after the scan_hint (possibly not true iff
348 * contig_hint == scan_hint). Third, is the allocation request
349 * larger than the scan_hint.
351 if (block->scan_hint &&
352 block->contig_hint_start > block->scan_hint_start &&
353 alloc_bits > block->scan_hint)
354 return block->scan_hint_start + block->scan_hint;
356 return block->first_free;
360 * pcpu_next_md_free_region - finds the next hint free area
361 * @chunk: chunk of interest
362 * @bit_off: chunk offset
363 * @bits: size of free area
365 * Helper function for pcpu_for_each_md_free_region. It checks
366 * block->contig_hint and performs aggregation across blocks to find the
367 * next hint. It modifies bit_off and bits in-place to be consumed in the
370 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
373 int i = pcpu_off_to_block_index(*bit_off);
374 int block_off = pcpu_off_to_block_off(*bit_off);
375 struct pcpu_block_md *block;
378 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
380 /* handles contig area across blocks */
382 *bits += block->left_free;
383 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
389 * This checks three things. First is there a contig_hint to
390 * check. Second, have we checked this hint before by
391 * comparing the block_off. Third, is this the same as the
392 * right contig hint. In the last case, it spills over into
393 * the next block and should be handled by the contig area
394 * across blocks code.
396 *bits = block->contig_hint;
397 if (*bits && block->contig_hint_start >= block_off &&
398 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
399 *bit_off = pcpu_block_off_to_off(i,
400 block->contig_hint_start);
403 /* reset to satisfy the second predicate above */
406 *bits = block->right_free;
407 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
412 * pcpu_next_fit_region - finds fit areas for a given allocation request
413 * @chunk: chunk of interest
414 * @alloc_bits: size of allocation
415 * @align: alignment of area (max PAGE_SIZE)
416 * @bit_off: chunk offset
417 * @bits: size of free area
419 * Finds the next free region that is viable for use with a given size and
420 * alignment. This only returns if there is a valid area to be used for this
421 * allocation. block->first_free is returned if the allocation request fits
422 * within the block to see if the request can be fulfilled prior to the contig
425 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
426 int align, int *bit_off, int *bits)
428 int i = pcpu_off_to_block_index(*bit_off);
429 int block_off = pcpu_off_to_block_off(*bit_off);
430 struct pcpu_block_md *block;
433 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
435 /* handles contig area across blocks */
437 *bits += block->left_free;
438 if (*bits >= alloc_bits)
440 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
444 /* check block->contig_hint */
445 *bits = ALIGN(block->contig_hint_start, align) -
446 block->contig_hint_start;
448 * This uses the block offset to determine if this has been
449 * checked in the prior iteration.
451 if (block->contig_hint &&
452 block->contig_hint_start >= block_off &&
453 block->contig_hint >= *bits + alloc_bits) {
454 int start = pcpu_next_hint(block, alloc_bits);
456 *bits += alloc_bits + block->contig_hint_start -
458 *bit_off = pcpu_block_off_to_off(i, start);
461 /* reset to satisfy the second predicate above */
464 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
466 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
467 *bit_off = pcpu_block_off_to_off(i, *bit_off);
468 if (*bits >= alloc_bits)
472 /* no valid offsets were found - fail condition */
473 *bit_off = pcpu_chunk_map_bits(chunk);
477 * Metadata free area iterators. These perform aggregation of free areas
478 * based on the metadata blocks and return the offset @bit_off and size in
479 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
480 * a fit is found for the allocation request.
482 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
483 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
484 (bit_off) < pcpu_chunk_map_bits((chunk)); \
485 (bit_off) += (bits) + 1, \
486 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
488 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
489 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
491 (bit_off) < pcpu_chunk_map_bits((chunk)); \
492 (bit_off) += (bits), \
493 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
497 * pcpu_mem_zalloc - allocate memory
498 * @size: bytes to allocate
499 * @gfp: allocation flags
501 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
502 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
503 * This is to facilitate passing through whitelisted flags. The
504 * returned memory is always zeroed.
507 * Pointer to the allocated area on success, NULL on failure.
509 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
511 if (WARN_ON_ONCE(!slab_is_available()))
514 if (size <= PAGE_SIZE)
515 return kzalloc(size, gfp);
517 return __vmalloc(size, gfp | __GFP_ZERO);
521 * pcpu_mem_free - free memory
522 * @ptr: memory to free
524 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
526 static void pcpu_mem_free(void *ptr)
531 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
534 if (chunk != pcpu_reserved_chunk) {
536 list_move(&chunk->list, &pcpu_chunk_lists[slot]);
538 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
542 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
544 __pcpu_chunk_move(chunk, slot, true);
548 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
549 * @chunk: chunk of interest
550 * @oslot: the previous slot it was on
552 * This function is called after an allocation or free changed @chunk.
553 * New slot according to the changed state is determined and @chunk is
554 * moved to the slot. Note that the reserved chunk is never put on
560 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
562 int nslot = pcpu_chunk_slot(chunk);
564 /* leave isolated chunks in-place */
569 __pcpu_chunk_move(chunk, nslot, oslot < nslot);
572 static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
574 lockdep_assert_held(&pcpu_lock);
576 if (!chunk->isolated) {
577 chunk->isolated = true;
578 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
580 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
583 static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
585 lockdep_assert_held(&pcpu_lock);
587 if (chunk->isolated) {
588 chunk->isolated = false;
589 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
590 pcpu_chunk_relocate(chunk, -1);
595 * pcpu_update_empty_pages - update empty page counters
596 * @chunk: chunk of interest
597 * @nr: nr of empty pages
599 * This is used to keep track of the empty pages now based on the premise
600 * a md_block covers a page. The hint update functions recognize if a block
601 * is made full or broken to calculate deltas for keeping track of free pages.
603 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
605 chunk->nr_empty_pop_pages += nr;
606 if (chunk != pcpu_reserved_chunk && !chunk->isolated)
607 pcpu_nr_empty_pop_pages += nr;
611 * pcpu_region_overlap - determines if two regions overlap
612 * @a: start of first region, inclusive
613 * @b: end of first region, exclusive
614 * @x: start of second region, inclusive
615 * @y: end of second region, exclusive
617 * This is used to determine if the hint region [a, b) overlaps with the
618 * allocated region [x, y).
620 static inline bool pcpu_region_overlap(int a, int b, int x, int y)
622 return (a < y) && (x < b);
626 * pcpu_block_update - updates a block given a free area
627 * @block: block of interest
628 * @start: start offset in block
629 * @end: end offset in block
631 * Updates a block given a known free area. The region [start, end) is
632 * expected to be the entirety of the free area within a block. Chooses
633 * the best starting offset if the contig hints are equal.
635 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
637 int contig = end - start;
639 block->first_free = min(block->first_free, start);
641 block->left_free = contig;
643 if (end == block->nr_bits)
644 block->right_free = contig;
646 if (contig > block->contig_hint) {
647 /* promote the old contig_hint to be the new scan_hint */
648 if (start > block->contig_hint_start) {
649 if (block->contig_hint > block->scan_hint) {
650 block->scan_hint_start =
651 block->contig_hint_start;
652 block->scan_hint = block->contig_hint;
653 } else if (start < block->scan_hint_start) {
655 * The old contig_hint == scan_hint. But, the
656 * new contig is larger so hold the invariant
657 * scan_hint_start < contig_hint_start.
659 block->scan_hint = 0;
662 block->scan_hint = 0;
664 block->contig_hint_start = start;
665 block->contig_hint = contig;
666 } else if (contig == block->contig_hint) {
667 if (block->contig_hint_start &&
669 __ffs(start) > __ffs(block->contig_hint_start))) {
670 /* start has a better alignment so use it */
671 block->contig_hint_start = start;
672 if (start < block->scan_hint_start &&
673 block->contig_hint > block->scan_hint)
674 block->scan_hint = 0;
675 } else if (start > block->scan_hint_start ||
676 block->contig_hint > block->scan_hint) {
678 * Knowing contig == contig_hint, update the scan_hint
679 * if it is farther than or larger than the current
682 block->scan_hint_start = start;
683 block->scan_hint = contig;
687 * The region is smaller than the contig_hint. So only update
688 * the scan_hint if it is larger than or equal and farther than
689 * the current scan_hint.
691 if ((start < block->contig_hint_start &&
692 (contig > block->scan_hint ||
693 (contig == block->scan_hint &&
694 start > block->scan_hint_start)))) {
695 block->scan_hint_start = start;
696 block->scan_hint = contig;
702 * pcpu_block_update_scan - update a block given a free area from a scan
703 * @chunk: chunk of interest
704 * @bit_off: chunk offset
705 * @bits: size of free area
707 * Finding the final allocation spot first goes through pcpu_find_block_fit()
708 * to find a block that can hold the allocation and then pcpu_alloc_area()
709 * where a scan is used. When allocations require specific alignments,
710 * we can inadvertently create holes which will not be seen in the alloc
713 * This takes a given free area hole and updates a block as it may change the
714 * scan_hint. We need to scan backwards to ensure we don't miss free bits
717 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
720 int s_off = pcpu_off_to_block_off(bit_off);
721 int e_off = s_off + bits;
723 struct pcpu_block_md *block;
725 if (e_off > PCPU_BITMAP_BLOCK_BITS)
728 s_index = pcpu_off_to_block_index(bit_off);
729 block = chunk->md_blocks + s_index;
731 /* scan backwards in case of alignment skipping free bits */
732 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
733 s_off = (s_off == l_bit) ? 0 : l_bit + 1;
735 pcpu_block_update(block, s_off, e_off);
739 * pcpu_chunk_refresh_hint - updates metadata about a chunk
740 * @chunk: chunk of interest
741 * @full_scan: if we should scan from the beginning
743 * Iterates over the metadata blocks to find the largest contig area.
744 * A full scan can be avoided on the allocation path as this is triggered
745 * if we broke the contig_hint. In doing so, the scan_hint will be before
746 * the contig_hint or after if the scan_hint == contig_hint. This cannot
747 * be prevented on freeing as we want to find the largest area possibly
750 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
752 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
755 /* promote scan_hint to contig_hint */
756 if (!full_scan && chunk_md->scan_hint) {
757 bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
758 chunk_md->contig_hint_start = chunk_md->scan_hint_start;
759 chunk_md->contig_hint = chunk_md->scan_hint;
760 chunk_md->scan_hint = 0;
762 bit_off = chunk_md->first_free;
763 chunk_md->contig_hint = 0;
767 pcpu_for_each_md_free_region(chunk, bit_off, bits)
768 pcpu_block_update(chunk_md, bit_off, bit_off + bits);
772 * pcpu_block_refresh_hint
773 * @chunk: chunk of interest
774 * @index: index of the metadata block
776 * Scans over the block beginning at first_free and updates the block
777 * metadata accordingly.
779 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
781 struct pcpu_block_md *block = chunk->md_blocks + index;
782 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
783 unsigned int rs, re, start; /* region start, region end */
785 /* promote scan_hint to contig_hint */
786 if (block->scan_hint) {
787 start = block->scan_hint_start + block->scan_hint;
788 block->contig_hint_start = block->scan_hint_start;
789 block->contig_hint = block->scan_hint;
790 block->scan_hint = 0;
792 start = block->first_free;
793 block->contig_hint = 0;
796 block->right_free = 0;
798 /* iterate over free areas and update the contig hints */
799 bitmap_for_each_clear_region(alloc_map, rs, re, start,
800 PCPU_BITMAP_BLOCK_BITS)
801 pcpu_block_update(block, rs, re);
805 * pcpu_block_update_hint_alloc - update hint on allocation path
806 * @chunk: chunk of interest
807 * @bit_off: chunk offset
808 * @bits: size of request
810 * Updates metadata for the allocation path. The metadata only has to be
811 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
812 * scans are required if the block's contig hint is broken.
814 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
817 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
818 int nr_empty_pages = 0;
819 struct pcpu_block_md *s_block, *e_block, *block;
820 int s_index, e_index; /* block indexes of the freed allocation */
821 int s_off, e_off; /* block offsets of the freed allocation */
824 * Calculate per block offsets.
825 * The calculation uses an inclusive range, but the resulting offsets
826 * are [start, end). e_index always points to the last block in the
829 s_index = pcpu_off_to_block_index(bit_off);
830 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
831 s_off = pcpu_off_to_block_off(bit_off);
832 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
834 s_block = chunk->md_blocks + s_index;
835 e_block = chunk->md_blocks + e_index;
839 * block->first_free must be updated if the allocation takes its place.
840 * If the allocation breaks the contig_hint, a scan is required to
843 if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
846 if (s_off == s_block->first_free)
847 s_block->first_free = find_next_zero_bit(
848 pcpu_index_alloc_map(chunk, s_index),
849 PCPU_BITMAP_BLOCK_BITS,
852 if (pcpu_region_overlap(s_block->scan_hint_start,
853 s_block->scan_hint_start + s_block->scan_hint,
856 s_block->scan_hint = 0;
858 if (pcpu_region_overlap(s_block->contig_hint_start,
859 s_block->contig_hint_start +
860 s_block->contig_hint,
863 /* block contig hint is broken - scan to fix it */
865 s_block->left_free = 0;
866 pcpu_block_refresh_hint(chunk, s_index);
868 /* update left and right contig manually */
869 s_block->left_free = min(s_block->left_free, s_off);
870 if (s_index == e_index)
871 s_block->right_free = min_t(int, s_block->right_free,
872 PCPU_BITMAP_BLOCK_BITS - e_off);
874 s_block->right_free = 0;
880 if (s_index != e_index) {
881 if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
885 * When the allocation is across blocks, the end is along
886 * the left part of the e_block.
888 e_block->first_free = find_next_zero_bit(
889 pcpu_index_alloc_map(chunk, e_index),
890 PCPU_BITMAP_BLOCK_BITS, e_off);
892 if (e_off == PCPU_BITMAP_BLOCK_BITS) {
893 /* reset the block */
896 if (e_off > e_block->scan_hint_start)
897 e_block->scan_hint = 0;
899 e_block->left_free = 0;
900 if (e_off > e_block->contig_hint_start) {
901 /* contig hint is broken - scan to fix it */
902 pcpu_block_refresh_hint(chunk, e_index);
904 e_block->right_free =
905 min_t(int, e_block->right_free,
906 PCPU_BITMAP_BLOCK_BITS - e_off);
910 /* update in-between md_blocks */
911 nr_empty_pages += (e_index - s_index - 1);
912 for (block = s_block + 1; block < e_block; block++) {
913 block->scan_hint = 0;
914 block->contig_hint = 0;
915 block->left_free = 0;
916 block->right_free = 0;
921 pcpu_update_empty_pages(chunk, -nr_empty_pages);
923 if (pcpu_region_overlap(chunk_md->scan_hint_start,
924 chunk_md->scan_hint_start +
928 chunk_md->scan_hint = 0;
931 * The only time a full chunk scan is required is if the chunk
932 * contig hint is broken. Otherwise, it means a smaller space
933 * was used and therefore the chunk contig hint is still correct.
935 if (pcpu_region_overlap(chunk_md->contig_hint_start,
936 chunk_md->contig_hint_start +
937 chunk_md->contig_hint,
940 pcpu_chunk_refresh_hint(chunk, false);
944 * pcpu_block_update_hint_free - updates the block hints on the free path
945 * @chunk: chunk of interest
946 * @bit_off: chunk offset
947 * @bits: size of request
949 * Updates metadata for the allocation path. This avoids a blind block
950 * refresh by making use of the block contig hints. If this fails, it scans
951 * forward and backward to determine the extent of the free area. This is
952 * capped at the boundary of blocks.
954 * A chunk update is triggered if a page becomes free, a block becomes free,
955 * or the free spans across blocks. This tradeoff is to minimize iterating
956 * over the block metadata to update chunk_md->contig_hint.
957 * chunk_md->contig_hint may be off by up to a page, but it will never be more
958 * than the available space. If the contig hint is contained in one block, it
961 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
964 int nr_empty_pages = 0;
965 struct pcpu_block_md *s_block, *e_block, *block;
966 int s_index, e_index; /* block indexes of the freed allocation */
967 int s_off, e_off; /* block offsets of the freed allocation */
968 int start, end; /* start and end of the whole free area */
971 * Calculate per block offsets.
972 * The calculation uses an inclusive range, but the resulting offsets
973 * are [start, end). e_index always points to the last block in the
976 s_index = pcpu_off_to_block_index(bit_off);
977 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
978 s_off = pcpu_off_to_block_off(bit_off);
979 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
981 s_block = chunk->md_blocks + s_index;
982 e_block = chunk->md_blocks + e_index;
985 * Check if the freed area aligns with the block->contig_hint.
986 * If it does, then the scan to find the beginning/end of the
987 * larger free area can be avoided.
989 * start and end refer to beginning and end of the free area
990 * within each their respective blocks. This is not necessarily
991 * the entire free area as it may span blocks past the beginning
992 * or end of the block.
995 if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
996 start = s_block->contig_hint_start;
999 * Scan backwards to find the extent of the free area.
1000 * find_last_bit returns the starting bit, so if the start bit
1001 * is returned, that means there was no last bit and the
1002 * remainder of the chunk is free.
1004 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1006 start = (start == l_bit) ? 0 : l_bit + 1;
1010 if (e_off == e_block->contig_hint_start)
1011 end = e_block->contig_hint_start + e_block->contig_hint;
1013 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1014 PCPU_BITMAP_BLOCK_BITS, end);
1016 /* update s_block */
1017 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1018 if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1020 pcpu_block_update(s_block, start, e_off);
1022 /* freeing in the same block */
1023 if (s_index != e_index) {
1024 /* update e_block */
1025 if (end == PCPU_BITMAP_BLOCK_BITS)
1027 pcpu_block_update(e_block, 0, end);
1029 /* reset md_blocks in the middle */
1030 nr_empty_pages += (e_index - s_index - 1);
1031 for (block = s_block + 1; block < e_block; block++) {
1032 block->first_free = 0;
1033 block->scan_hint = 0;
1034 block->contig_hint_start = 0;
1035 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1036 block->left_free = PCPU_BITMAP_BLOCK_BITS;
1037 block->right_free = PCPU_BITMAP_BLOCK_BITS;
1042 pcpu_update_empty_pages(chunk, nr_empty_pages);
1045 * Refresh chunk metadata when the free makes a block free or spans
1046 * across blocks. The contig_hint may be off by up to a page, but if
1047 * the contig_hint is contained in a block, it will be accurate with
1048 * the else condition below.
1050 if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1051 pcpu_chunk_refresh_hint(chunk, true);
1053 pcpu_block_update(&chunk->chunk_md,
1054 pcpu_block_off_to_off(s_index, start),
1059 * pcpu_is_populated - determines if the region is populated
1060 * @chunk: chunk of interest
1061 * @bit_off: chunk offset
1062 * @bits: size of area
1063 * @next_off: return value for the next offset to start searching
1065 * For atomic allocations, check if the backing pages are populated.
1068 * Bool if the backing pages are populated.
1069 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1071 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1074 unsigned int page_start, page_end, rs, re;
1076 page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1077 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1080 bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
1084 *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1089 * pcpu_find_block_fit - finds the block index to start searching
1090 * @chunk: chunk of interest
1091 * @alloc_bits: size of request in allocation units
1092 * @align: alignment of area (max PAGE_SIZE bytes)
1093 * @pop_only: use populated regions only
1095 * Given a chunk and an allocation spec, find the offset to begin searching
1096 * for a free region. This iterates over the bitmap metadata blocks to
1097 * find an offset that will be guaranteed to fit the requirements. It is
1098 * not quite first fit as if the allocation does not fit in the contig hint
1099 * of a block or chunk, it is skipped. This errs on the side of caution
1100 * to prevent excess iteration. Poor alignment can cause the allocator to
1101 * skip over blocks and chunks that have valid free areas.
1104 * The offset in the bitmap to begin searching.
1105 * -1 if no offset is found.
1107 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1108 size_t align, bool pop_only)
1110 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1111 int bit_off, bits, next_off;
1114 * This is an optimization to prevent scanning by assuming if the
1115 * allocation cannot fit in the global hint, there is memory pressure
1116 * and creating a new chunk would happen soon.
1118 if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
1121 bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1123 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1124 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1132 if (bit_off == pcpu_chunk_map_bits(chunk))
1139 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1140 * @map: the address to base the search on
1141 * @size: the bitmap size in bits
1142 * @start: the bitnumber to start searching at
1143 * @nr: the number of zeroed bits we're looking for
1144 * @align_mask: alignment mask for zero area
1145 * @largest_off: offset of the largest area skipped
1146 * @largest_bits: size of the largest area skipped
1148 * The @align_mask should be one less than a power of 2.
1150 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1151 * the largest area that was skipped. This is imperfect, but in general is
1152 * good enough. The largest remembered region is the largest failed region
1153 * seen. This does not include anything we possibly skipped due to alignment.
1154 * pcpu_block_update_scan() does scan backwards to try and recover what was
1155 * lost to alignment. While this can cause scanning to miss earlier possible
1156 * free areas, smaller allocations will eventually fill those holes.
1158 static unsigned long pcpu_find_zero_area(unsigned long *map,
1160 unsigned long start,
1162 unsigned long align_mask,
1163 unsigned long *largest_off,
1164 unsigned long *largest_bits)
1166 unsigned long index, end, i, area_off, area_bits;
1168 index = find_next_zero_bit(map, size, start);
1170 /* Align allocation */
1171 index = __ALIGN_MASK(index, align_mask);
1177 i = find_next_bit(map, end, index);
1179 area_bits = i - area_off;
1180 /* remember largest unused area with best alignment */
1181 if (area_bits > *largest_bits ||
1182 (area_bits == *largest_bits && *largest_off &&
1183 (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1184 *largest_off = area_off;
1185 *largest_bits = area_bits;
1195 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1196 * @chunk: chunk of interest
1197 * @alloc_bits: size of request in allocation units
1198 * @align: alignment of area (max PAGE_SIZE)
1199 * @start: bit_off to start searching
1201 * This function takes in a @start offset to begin searching to fit an
1202 * allocation of @alloc_bits with alignment @align. It needs to scan
1203 * the allocation map because if it fits within the block's contig hint,
1204 * @start will be block->first_free. This is an attempt to fill the
1205 * allocation prior to breaking the contig hint. The allocation and
1206 * boundary maps are updated accordingly if it confirms a valid
1210 * Allocated addr offset in @chunk on success.
1211 * -1 if no matching area is found.
1213 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1214 size_t align, int start)
1216 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1217 size_t align_mask = (align) ? (align - 1) : 0;
1218 unsigned long area_off = 0, area_bits = 0;
1219 int bit_off, end, oslot;
1221 lockdep_assert_held(&pcpu_lock);
1223 oslot = pcpu_chunk_slot(chunk);
1226 * Search to find a fit.
1228 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1229 pcpu_chunk_map_bits(chunk));
1230 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1231 align_mask, &area_off, &area_bits);
1236 pcpu_block_update_scan(chunk, area_off, area_bits);
1238 /* update alloc map */
1239 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1241 /* update boundary map */
1242 set_bit(bit_off, chunk->bound_map);
1243 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1244 set_bit(bit_off + alloc_bits, chunk->bound_map);
1246 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1248 /* update first free bit */
1249 if (bit_off == chunk_md->first_free)
1250 chunk_md->first_free = find_next_zero_bit(
1252 pcpu_chunk_map_bits(chunk),
1253 bit_off + alloc_bits);
1255 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1257 pcpu_chunk_relocate(chunk, oslot);
1259 return bit_off * PCPU_MIN_ALLOC_SIZE;
1263 * pcpu_free_area - frees the corresponding offset
1264 * @chunk: chunk of interest
1265 * @off: addr offset into chunk
1267 * This function determines the size of an allocation to free using
1268 * the boundary bitmap and clears the allocation map.
1271 * Number of freed bytes.
1273 static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1275 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1276 int bit_off, bits, end, oslot, freed;
1278 lockdep_assert_held(&pcpu_lock);
1279 pcpu_stats_area_dealloc(chunk);
1281 oslot = pcpu_chunk_slot(chunk);
1283 bit_off = off / PCPU_MIN_ALLOC_SIZE;
1285 /* find end index */
1286 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1288 bits = end - bit_off;
1289 bitmap_clear(chunk->alloc_map, bit_off, bits);
1291 freed = bits * PCPU_MIN_ALLOC_SIZE;
1293 /* update metadata */
1294 chunk->free_bytes += freed;
1296 /* update first free bit */
1297 chunk_md->first_free = min(chunk_md->first_free, bit_off);
1299 pcpu_block_update_hint_free(chunk, bit_off, bits);
1301 pcpu_chunk_relocate(chunk, oslot);
1306 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1308 block->scan_hint = 0;
1309 block->contig_hint = nr_bits;
1310 block->left_free = nr_bits;
1311 block->right_free = nr_bits;
1312 block->first_free = 0;
1313 block->nr_bits = nr_bits;
1316 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1318 struct pcpu_block_md *md_block;
1320 /* init the chunk's block */
1321 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1323 for (md_block = chunk->md_blocks;
1324 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1326 pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1330 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1331 * @tmp_addr: the start of the region served
1332 * @map_size: size of the region served
1334 * This is responsible for creating the chunks that serve the first chunk. The
1335 * base_addr is page aligned down of @tmp_addr while the region end is page
1336 * aligned up. Offsets are kept track of to determine the region served. All
1337 * this is done to appease the bitmap allocator in avoiding partial blocks.
1340 * Chunk serving the region at @tmp_addr of @map_size.
1342 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1345 struct pcpu_chunk *chunk;
1346 unsigned long aligned_addr, lcm_align;
1347 int start_offset, offset_bits, region_size, region_bits;
1350 /* region calculations */
1351 aligned_addr = tmp_addr & PAGE_MASK;
1353 start_offset = tmp_addr - aligned_addr;
1356 * Align the end of the region with the LCM of PAGE_SIZE and
1357 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1360 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1361 region_size = ALIGN(start_offset + map_size, lcm_align);
1363 /* allocate chunk */
1364 alloc_size = struct_size(chunk, populated,
1365 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1366 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1368 panic("%s: Failed to allocate %zu bytes\n", __func__,
1371 INIT_LIST_HEAD(&chunk->list);
1373 chunk->base_addr = (void *)aligned_addr;
1374 chunk->start_offset = start_offset;
1375 chunk->end_offset = region_size - chunk->start_offset - map_size;
1377 chunk->nr_pages = region_size >> PAGE_SHIFT;
1378 region_bits = pcpu_chunk_map_bits(chunk);
1380 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1381 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1382 if (!chunk->alloc_map)
1383 panic("%s: Failed to allocate %zu bytes\n", __func__,
1387 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1388 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1389 if (!chunk->bound_map)
1390 panic("%s: Failed to allocate %zu bytes\n", __func__,
1393 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1394 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1395 if (!chunk->md_blocks)
1396 panic("%s: Failed to allocate %zu bytes\n", __func__,
1399 #ifdef CONFIG_MEMCG_KMEM
1400 /* first chunk is free to use */
1401 chunk->obj_cgroups = NULL;
1403 pcpu_init_md_blocks(chunk);
1405 /* manage populated page bitmap */
1406 chunk->immutable = true;
1407 bitmap_fill(chunk->populated, chunk->nr_pages);
1408 chunk->nr_populated = chunk->nr_pages;
1409 chunk->nr_empty_pop_pages = chunk->nr_pages;
1411 chunk->free_bytes = map_size;
1413 if (chunk->start_offset) {
1414 /* hide the beginning of the bitmap */
1415 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1416 bitmap_set(chunk->alloc_map, 0, offset_bits);
1417 set_bit(0, chunk->bound_map);
1418 set_bit(offset_bits, chunk->bound_map);
1420 chunk->chunk_md.first_free = offset_bits;
1422 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1425 if (chunk->end_offset) {
1426 /* hide the end of the bitmap */
1427 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1428 bitmap_set(chunk->alloc_map,
1429 pcpu_chunk_map_bits(chunk) - offset_bits,
1431 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1433 set_bit(region_bits, chunk->bound_map);
1435 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1436 - offset_bits, offset_bits);
1442 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1444 struct pcpu_chunk *chunk;
1447 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1451 INIT_LIST_HEAD(&chunk->list);
1452 chunk->nr_pages = pcpu_unit_pages;
1453 region_bits = pcpu_chunk_map_bits(chunk);
1455 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1456 sizeof(chunk->alloc_map[0]), gfp);
1457 if (!chunk->alloc_map)
1458 goto alloc_map_fail;
1460 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1461 sizeof(chunk->bound_map[0]), gfp);
1462 if (!chunk->bound_map)
1463 goto bound_map_fail;
1465 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1466 sizeof(chunk->md_blocks[0]), gfp);
1467 if (!chunk->md_blocks)
1468 goto md_blocks_fail;
1470 #ifdef CONFIG_MEMCG_KMEM
1471 if (!mem_cgroup_kmem_disabled()) {
1472 chunk->obj_cgroups =
1473 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1474 sizeof(struct obj_cgroup *), gfp);
1475 if (!chunk->obj_cgroups)
1480 pcpu_init_md_blocks(chunk);
1483 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1487 #ifdef CONFIG_MEMCG_KMEM
1489 pcpu_mem_free(chunk->md_blocks);
1492 pcpu_mem_free(chunk->bound_map);
1494 pcpu_mem_free(chunk->alloc_map);
1496 pcpu_mem_free(chunk);
1501 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1505 #ifdef CONFIG_MEMCG_KMEM
1506 pcpu_mem_free(chunk->obj_cgroups);
1508 pcpu_mem_free(chunk->md_blocks);
1509 pcpu_mem_free(chunk->bound_map);
1510 pcpu_mem_free(chunk->alloc_map);
1511 pcpu_mem_free(chunk);
1515 * pcpu_chunk_populated - post-population bookkeeping
1516 * @chunk: pcpu_chunk which got populated
1517 * @page_start: the start page
1518 * @page_end: the end page
1520 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1521 * the bookkeeping information accordingly. Must be called after each
1522 * successful population.
1524 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1525 * is to serve an allocation in that area.
1527 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1530 int nr = page_end - page_start;
1532 lockdep_assert_held(&pcpu_lock);
1534 bitmap_set(chunk->populated, page_start, nr);
1535 chunk->nr_populated += nr;
1536 pcpu_nr_populated += nr;
1538 pcpu_update_empty_pages(chunk, nr);
1542 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1543 * @chunk: pcpu_chunk which got depopulated
1544 * @page_start: the start page
1545 * @page_end: the end page
1547 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1548 * Update the bookkeeping information accordingly. Must be called after
1549 * each successful depopulation.
1551 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1552 int page_start, int page_end)
1554 int nr = page_end - page_start;
1556 lockdep_assert_held(&pcpu_lock);
1558 bitmap_clear(chunk->populated, page_start, nr);
1559 chunk->nr_populated -= nr;
1560 pcpu_nr_populated -= nr;
1562 pcpu_update_empty_pages(chunk, -nr);
1566 * Chunk management implementation.
1568 * To allow different implementations, chunk alloc/free and
1569 * [de]population are implemented in a separate file which is pulled
1570 * into this file and compiled together. The following functions
1571 * should be implemented.
1573 * pcpu_populate_chunk - populate the specified range of a chunk
1574 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1575 * pcpu_create_chunk - create a new chunk
1576 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1577 * pcpu_addr_to_page - translate address to physical address
1578 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1580 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1581 int page_start, int page_end, gfp_t gfp);
1582 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1583 int page_start, int page_end);
1584 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1585 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1586 static struct page *pcpu_addr_to_page(void *addr);
1587 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1589 #ifdef CONFIG_NEED_PER_CPU_KM
1590 #include "percpu-km.c"
1592 #include "percpu-vm.c"
1596 * pcpu_chunk_addr_search - determine chunk containing specified address
1597 * @addr: address for which the chunk needs to be determined.
1599 * This is an internal function that handles all but static allocations.
1600 * Static percpu address values should never be passed into the allocator.
1603 * The address of the found chunk.
1605 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1607 /* is it in the dynamic region (first chunk)? */
1608 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1609 return pcpu_first_chunk;
1611 /* is it in the reserved region? */
1612 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1613 return pcpu_reserved_chunk;
1616 * The address is relative to unit0 which might be unused and
1617 * thus unmapped. Offset the address to the unit space of the
1618 * current processor before looking it up in the vmalloc
1619 * space. Note that any possible cpu id can be used here, so
1620 * there's no need to worry about preemption or cpu hotplug.
1622 addr += pcpu_unit_offsets[raw_smp_processor_id()];
1623 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1626 #ifdef CONFIG_MEMCG_KMEM
1627 static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1628 struct obj_cgroup **objcgp)
1630 struct obj_cgroup *objcg;
1632 if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
1635 objcg = get_obj_cgroup_from_current();
1639 if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1640 obj_cgroup_put(objcg);
1648 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1649 struct pcpu_chunk *chunk, int off,
1655 if (likely(chunk && chunk->obj_cgroups)) {
1656 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1659 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1660 size * num_possible_cpus());
1663 obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1664 obj_cgroup_put(objcg);
1668 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1670 struct obj_cgroup *objcg;
1672 if (unlikely(!chunk->obj_cgroups))
1675 objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1678 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1680 obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1683 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1684 -(size * num_possible_cpus()));
1687 obj_cgroup_put(objcg);
1690 #else /* CONFIG_MEMCG_KMEM */
1692 pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1697 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1698 struct pcpu_chunk *chunk, int off,
1703 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1706 #endif /* CONFIG_MEMCG_KMEM */
1709 * pcpu_alloc - the percpu allocator
1710 * @size: size of area to allocate in bytes
1711 * @align: alignment of area (max PAGE_SIZE)
1712 * @reserved: allocate from the reserved chunk if available
1713 * @gfp: allocation flags
1715 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1716 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1717 * then no warning will be triggered on invalid or failed allocation
1721 * Percpu pointer to the allocated area on success, NULL on failure.
1723 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1729 struct obj_cgroup *objcg = NULL;
1730 static int warn_limit = 10;
1731 struct pcpu_chunk *chunk, *next;
1733 int slot, off, cpu, ret;
1734 unsigned long flags;
1736 size_t bits, bit_align;
1738 gfp = current_gfp_context(gfp);
1739 /* whitelisted flags that can be passed to the backing allocators */
1740 pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1741 is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1742 do_warn = !(gfp & __GFP_NOWARN);
1745 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1746 * therefore alignment must be a minimum of that many bytes.
1747 * An allocation may have internal fragmentation from rounding up
1748 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1750 if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1751 align = PCPU_MIN_ALLOC_SIZE;
1753 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1754 bits = size >> PCPU_MIN_ALLOC_SHIFT;
1755 bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1757 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1758 !is_power_of_2(align))) {
1759 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1764 if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
1769 * pcpu_balance_workfn() allocates memory under this mutex,
1770 * and it may wait for memory reclaim. Allow current task
1771 * to become OOM victim, in case of memory pressure.
1773 if (gfp & __GFP_NOFAIL) {
1774 mutex_lock(&pcpu_alloc_mutex);
1775 } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1776 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1781 spin_lock_irqsave(&pcpu_lock, flags);
1783 /* serve reserved allocations from the reserved chunk if available */
1784 if (reserved && pcpu_reserved_chunk) {
1785 chunk = pcpu_reserved_chunk;
1787 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1789 err = "alloc from reserved chunk failed";
1793 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1797 err = "alloc from reserved chunk failed";
1802 /* search through normal chunks */
1803 for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1804 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1806 off = pcpu_find_block_fit(chunk, bits, bit_align,
1809 if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1810 pcpu_chunk_move(chunk, 0);
1814 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1816 pcpu_reintegrate_chunk(chunk);
1822 spin_unlock_irqrestore(&pcpu_lock, flags);
1825 * No space left. Create a new chunk. We don't want multiple
1826 * tasks to create chunks simultaneously. Serialize and create iff
1827 * there's still no empty chunk after grabbing the mutex.
1830 err = "atomic alloc failed, no space left";
1834 if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1835 chunk = pcpu_create_chunk(pcpu_gfp);
1837 err = "failed to allocate new chunk";
1841 spin_lock_irqsave(&pcpu_lock, flags);
1842 pcpu_chunk_relocate(chunk, -1);
1844 spin_lock_irqsave(&pcpu_lock, flags);
1850 pcpu_stats_area_alloc(chunk, size);
1851 spin_unlock_irqrestore(&pcpu_lock, flags);
1853 /* populate if not all pages are already there */
1855 unsigned int page_start, page_end, rs, re;
1857 page_start = PFN_DOWN(off);
1858 page_end = PFN_UP(off + size);
1860 bitmap_for_each_clear_region(chunk->populated, rs, re,
1861 page_start, page_end) {
1862 WARN_ON(chunk->immutable);
1864 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1866 spin_lock_irqsave(&pcpu_lock, flags);
1868 pcpu_free_area(chunk, off);
1869 err = "failed to populate";
1872 pcpu_chunk_populated(chunk, rs, re);
1873 spin_unlock_irqrestore(&pcpu_lock, flags);
1876 mutex_unlock(&pcpu_alloc_mutex);
1879 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1880 pcpu_schedule_balance_work();
1882 /* clear the areas and return address relative to base address */
1883 for_each_possible_cpu(cpu)
1884 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1886 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1887 kmemleak_alloc_percpu(ptr, size, gfp);
1889 trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1890 chunk->base_addr, off, ptr);
1892 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1897 spin_unlock_irqrestore(&pcpu_lock, flags);
1899 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1901 if (!is_atomic && do_warn && warn_limit) {
1902 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1903 size, align, is_atomic, err);
1906 pr_info("limit reached, disable warning\n");
1909 /* see the flag handling in pcpu_balance_workfn() */
1910 pcpu_atomic_alloc_failed = true;
1911 pcpu_schedule_balance_work();
1913 mutex_unlock(&pcpu_alloc_mutex);
1916 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1922 * __alloc_percpu_gfp - allocate dynamic percpu area
1923 * @size: size of area to allocate in bytes
1924 * @align: alignment of area (max PAGE_SIZE)
1925 * @gfp: allocation flags
1927 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1928 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1929 * be called from any context but is a lot more likely to fail. If @gfp
1930 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1931 * allocation requests.
1934 * Percpu pointer to the allocated area on success, NULL on failure.
1936 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1938 return pcpu_alloc(size, align, false, gfp);
1940 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1943 * __alloc_percpu - allocate dynamic percpu area
1944 * @size: size of area to allocate in bytes
1945 * @align: alignment of area (max PAGE_SIZE)
1947 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1949 void __percpu *__alloc_percpu(size_t size, size_t align)
1951 return pcpu_alloc(size, align, false, GFP_KERNEL);
1953 EXPORT_SYMBOL_GPL(__alloc_percpu);
1956 * __alloc_reserved_percpu - allocate reserved percpu area
1957 * @size: size of area to allocate in bytes
1958 * @align: alignment of area (max PAGE_SIZE)
1960 * Allocate zero-filled percpu area of @size bytes aligned at @align
1961 * from reserved percpu area if arch has set it up; otherwise,
1962 * allocation is served from the same dynamic area. Might sleep.
1963 * Might trigger writeouts.
1966 * Does GFP_KERNEL allocation.
1969 * Percpu pointer to the allocated area on success, NULL on failure.
1971 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1973 return pcpu_alloc(size, align, true, GFP_KERNEL);
1977 * pcpu_balance_free - manage the amount of free chunks
1978 * @empty_only: free chunks only if there are no populated pages
1980 * If empty_only is %false, reclaim all fully free chunks regardless of the
1981 * number of populated pages. Otherwise, only reclaim chunks that have no
1985 * pcpu_lock (can be dropped temporarily)
1987 static void pcpu_balance_free(bool empty_only)
1990 struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1991 struct pcpu_chunk *chunk, *next;
1993 lockdep_assert_held(&pcpu_lock);
1996 * There's no reason to keep around multiple unused chunks and VM
1997 * areas can be scarce. Destroy all free chunks except for one.
1999 list_for_each_entry_safe(chunk, next, free_head, list) {
2000 WARN_ON(chunk->immutable);
2002 /* spare the first one */
2003 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
2006 if (!empty_only || chunk->nr_empty_pop_pages == 0)
2007 list_move(&chunk->list, &to_free);
2010 if (list_empty(&to_free))
2013 spin_unlock_irq(&pcpu_lock);
2014 list_for_each_entry_safe(chunk, next, &to_free, list) {
2015 unsigned int rs, re;
2017 bitmap_for_each_set_region(chunk->populated, rs, re, 0,
2019 pcpu_depopulate_chunk(chunk, rs, re);
2020 spin_lock_irq(&pcpu_lock);
2021 pcpu_chunk_depopulated(chunk, rs, re);
2022 spin_unlock_irq(&pcpu_lock);
2024 pcpu_destroy_chunk(chunk);
2027 spin_lock_irq(&pcpu_lock);
2031 * pcpu_balance_populated - manage the amount of populated pages
2033 * Maintain a certain amount of populated pages to satisfy atomic allocations.
2034 * It is possible that this is called when physical memory is scarce causing
2035 * OOM killer to be triggered. We should avoid doing so until an actual
2036 * allocation causes the failure as it is possible that requests can be
2037 * serviced from already backed regions.
2040 * pcpu_lock (can be dropped temporarily)
2042 static void pcpu_balance_populated(void)
2044 /* gfp flags passed to underlying allocators */
2045 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2046 struct pcpu_chunk *chunk;
2047 int slot, nr_to_pop, ret;
2049 lockdep_assert_held(&pcpu_lock);
2052 * Ensure there are certain number of free populated pages for
2053 * atomic allocs. Fill up from the most packed so that atomic
2054 * allocs don't increase fragmentation. If atomic allocation
2055 * failed previously, always populate the maximum amount. This
2056 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2057 * failing indefinitely; however, large atomic allocs are not
2058 * something we support properly and can be highly unreliable and
2062 if (pcpu_atomic_alloc_failed) {
2063 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2064 /* best effort anyway, don't worry about synchronization */
2065 pcpu_atomic_alloc_failed = false;
2067 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2068 pcpu_nr_empty_pop_pages,
2069 0, PCPU_EMPTY_POP_PAGES_HIGH);
2072 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2073 unsigned int nr_unpop = 0, rs, re;
2078 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2079 nr_unpop = chunk->nr_pages - chunk->nr_populated;
2087 /* @chunk can't go away while pcpu_alloc_mutex is held */
2088 bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
2090 int nr = min_t(int, re - rs, nr_to_pop);
2092 spin_unlock_irq(&pcpu_lock);
2093 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2095 spin_lock_irq(&pcpu_lock);
2098 pcpu_chunk_populated(chunk, rs, rs + nr);
2109 /* ran out of chunks to populate, create a new one and retry */
2110 spin_unlock_irq(&pcpu_lock);
2111 chunk = pcpu_create_chunk(gfp);
2113 spin_lock_irq(&pcpu_lock);
2115 pcpu_chunk_relocate(chunk, -1);
2122 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2124 * Scan over chunks in the depopulate list and try to release unused populated
2125 * pages back to the system. Depopulated chunks are sidelined to prevent
2126 * repopulating these pages unless required. Fully free chunks are reintegrated
2127 * and freed accordingly (1 is kept around). If we drop below the empty
2128 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2129 * Each chunk is scanned in the reverse order to keep populated pages close to
2130 * the beginning of the chunk.
2133 * pcpu_lock (can be dropped temporarily)
2136 static void pcpu_reclaim_populated(void)
2138 struct pcpu_chunk *chunk;
2139 struct pcpu_block_md *block;
2142 lockdep_assert_held(&pcpu_lock);
2146 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2147 * longer discoverable to allocations whom may populate pages. The only
2148 * other accessor is the free path which only returns area back to the
2149 * allocator not touching the populated bitmap.
2151 while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) {
2152 chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot],
2153 struct pcpu_chunk, list);
2154 WARN_ON(chunk->immutable);
2157 * Scan chunk's pages in the reverse order to keep populated
2158 * pages close to the beginning of the chunk.
2160 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2161 /* no more work to do */
2162 if (chunk->nr_empty_pop_pages == 0)
2165 /* reintegrate chunk to prevent atomic alloc failures */
2166 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
2167 pcpu_reintegrate_chunk(chunk);
2172 * If the page is empty and populated, start or
2173 * extend the (i, end) range. If i == 0, decrease
2174 * i and perform the depopulation to cover the last
2175 * (first) page in the chunk.
2177 block = chunk->md_blocks + i;
2178 if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2179 test_bit(i, chunk->populated)) {
2187 /* depopulate if there is an active range */
2191 spin_unlock_irq(&pcpu_lock);
2192 pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2194 spin_lock_irq(&pcpu_lock);
2196 pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2198 /* reset the range and continue */
2202 if (chunk->free_bytes == pcpu_unit_size)
2203 pcpu_reintegrate_chunk(chunk);
2205 list_move(&chunk->list,
2206 &pcpu_chunk_lists[pcpu_sidelined_slot]);
2211 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2214 * For each chunk type, manage the number of fully free chunks and the number of
2215 * populated pages. An important thing to consider is when pages are freed and
2216 * how they contribute to the global counts.
2218 static void pcpu_balance_workfn(struct work_struct *work)
2221 * pcpu_balance_free() is called twice because the first time we may
2222 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2223 * to grow other chunks. This then gives pcpu_reclaim_populated() time
2224 * to move fully free chunks to the active list to be freed if
2227 mutex_lock(&pcpu_alloc_mutex);
2228 spin_lock_irq(&pcpu_lock);
2230 pcpu_balance_free(false);
2231 pcpu_reclaim_populated();
2232 pcpu_balance_populated();
2233 pcpu_balance_free(true);
2235 spin_unlock_irq(&pcpu_lock);
2236 mutex_unlock(&pcpu_alloc_mutex);
2240 * free_percpu - free percpu area
2241 * @ptr: pointer to area to free
2243 * Free percpu area @ptr.
2246 * Can be called from atomic context.
2248 void free_percpu(void __percpu *ptr)
2251 struct pcpu_chunk *chunk;
2252 unsigned long flags;
2254 bool need_balance = false;
2259 kmemleak_free_percpu(ptr);
2261 addr = __pcpu_ptr_to_addr(ptr);
2263 spin_lock_irqsave(&pcpu_lock, flags);
2265 chunk = pcpu_chunk_addr_search(addr);
2266 off = addr - chunk->base_addr;
2268 size = pcpu_free_area(chunk, off);
2270 pcpu_memcg_free_hook(chunk, off, size);
2273 * If there are more than one fully free chunks, wake up grim reaper.
2274 * If the chunk is isolated, it may be in the process of being
2275 * reclaimed. Let reclaim manage cleaning up of that chunk.
2277 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2278 struct pcpu_chunk *pos;
2280 list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2282 need_balance = true;
2285 } else if (pcpu_should_reclaim_chunk(chunk)) {
2286 pcpu_isolate_chunk(chunk);
2287 need_balance = true;
2290 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2292 spin_unlock_irqrestore(&pcpu_lock, flags);
2295 pcpu_schedule_balance_work();
2297 EXPORT_SYMBOL_GPL(free_percpu);
2299 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2302 const size_t static_size = __per_cpu_end - __per_cpu_start;
2303 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2306 for_each_possible_cpu(cpu) {
2307 void *start = per_cpu_ptr(base, cpu);
2308 void *va = (void *)addr;
2310 if (va >= start && va < start + static_size) {
2312 *can_addr = (unsigned long) (va - start);
2313 *can_addr += (unsigned long)
2314 per_cpu_ptr(base, get_boot_cpu_id());
2320 /* on UP, can't distinguish from other static vars, always false */
2325 * is_kernel_percpu_address - test whether address is from static percpu area
2326 * @addr: address to test
2328 * Test whether @addr belongs to in-kernel static percpu area. Module
2329 * static percpu areas are not considered. For those, use
2330 * is_module_percpu_address().
2333 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2335 bool is_kernel_percpu_address(unsigned long addr)
2337 return __is_kernel_percpu_address(addr, NULL);
2341 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2342 * @addr: the address to be converted to physical address
2344 * Given @addr which is dereferenceable address obtained via one of
2345 * percpu access macros, this function translates it into its physical
2346 * address. The caller is responsible for ensuring @addr stays valid
2347 * until this function finishes.
2349 * percpu allocator has special setup for the first chunk, which currently
2350 * supports either embedding in linear address space or vmalloc mapping,
2351 * and, from the second one, the backing allocator (currently either vm or
2352 * km) provides translation.
2354 * The addr can be translated simply without checking if it falls into the
2355 * first chunk. But the current code reflects better how percpu allocator
2356 * actually works, and the verification can discover both bugs in percpu
2357 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2361 * The physical address for @addr.
2363 phys_addr_t per_cpu_ptr_to_phys(void *addr)
2365 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2366 bool in_first_chunk = false;
2367 unsigned long first_low, first_high;
2371 * The following test on unit_low/high isn't strictly
2372 * necessary but will speed up lookups of addresses which
2373 * aren't in the first chunk.
2375 * The address check is against full chunk sizes. pcpu_base_addr
2376 * points to the beginning of the first chunk including the
2377 * static region. Assumes good intent as the first chunk may
2378 * not be full (ie. < pcpu_unit_pages in size).
2380 first_low = (unsigned long)pcpu_base_addr +
2381 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2382 first_high = (unsigned long)pcpu_base_addr +
2383 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2384 if ((unsigned long)addr >= first_low &&
2385 (unsigned long)addr < first_high) {
2386 for_each_possible_cpu(cpu) {
2387 void *start = per_cpu_ptr(base, cpu);
2389 if (addr >= start && addr < start + pcpu_unit_size) {
2390 in_first_chunk = true;
2396 if (in_first_chunk) {
2397 if (!is_vmalloc_addr(addr))
2400 return page_to_phys(vmalloc_to_page(addr)) +
2401 offset_in_page(addr);
2403 return page_to_phys(pcpu_addr_to_page(addr)) +
2404 offset_in_page(addr);
2408 * pcpu_alloc_alloc_info - allocate percpu allocation info
2409 * @nr_groups: the number of groups
2410 * @nr_units: the number of units
2412 * Allocate ai which is large enough for @nr_groups groups containing
2413 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2414 * cpu_map array which is long enough for @nr_units and filled with
2415 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2416 * pointer of other groups.
2419 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2422 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2425 struct pcpu_alloc_info *ai;
2426 size_t base_size, ai_size;
2430 base_size = ALIGN(struct_size(ai, groups, nr_groups),
2431 __alignof__(ai->groups[0].cpu_map[0]));
2432 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2434 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2440 ai->groups[0].cpu_map = ptr;
2442 for (unit = 0; unit < nr_units; unit++)
2443 ai->groups[0].cpu_map[unit] = NR_CPUS;
2445 ai->nr_groups = nr_groups;
2446 ai->__ai_size = PFN_ALIGN(ai_size);
2452 * pcpu_free_alloc_info - free percpu allocation info
2453 * @ai: pcpu_alloc_info to free
2455 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2457 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2459 memblock_free_early(__pa(ai), ai->__ai_size);
2463 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2465 * @ai: allocation info to dump
2467 * Print out information about @ai using loglevel @lvl.
2469 static void pcpu_dump_alloc_info(const char *lvl,
2470 const struct pcpu_alloc_info *ai)
2472 int group_width = 1, cpu_width = 1, width;
2473 char empty_str[] = "--------";
2474 int alloc = 0, alloc_end = 0;
2476 int upa, apl; /* units per alloc, allocs per line */
2482 v = num_possible_cpus();
2485 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2487 upa = ai->alloc_size / ai->unit_size;
2488 width = upa * (cpu_width + 1) + group_width + 3;
2489 apl = rounddown_pow_of_two(max(60 / width, 1));
2491 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2492 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2493 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2495 for (group = 0; group < ai->nr_groups; group++) {
2496 const struct pcpu_group_info *gi = &ai->groups[group];
2497 int unit = 0, unit_end = 0;
2499 BUG_ON(gi->nr_units % upa);
2500 for (alloc_end += gi->nr_units / upa;
2501 alloc < alloc_end; alloc++) {
2502 if (!(alloc % apl)) {
2504 printk("%spcpu-alloc: ", lvl);
2506 pr_cont("[%0*d] ", group_width, group);
2508 for (unit_end += upa; unit < unit_end; unit++)
2509 if (gi->cpu_map[unit] != NR_CPUS)
2511 cpu_width, gi->cpu_map[unit]);
2513 pr_cont("%s ", empty_str);
2520 * pcpu_setup_first_chunk - initialize the first percpu chunk
2521 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2522 * @base_addr: mapped address
2524 * Initialize the first percpu chunk which contains the kernel static
2525 * percpu area. This function is to be called from arch percpu area
2528 * @ai contains all information necessary to initialize the first
2529 * chunk and prime the dynamic percpu allocator.
2531 * @ai->static_size is the size of static percpu area.
2533 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2534 * reserve after the static area in the first chunk. This reserves
2535 * the first chunk such that it's available only through reserved
2536 * percpu allocation. This is primarily used to serve module percpu
2537 * static areas on architectures where the addressing model has
2538 * limited offset range for symbol relocations to guarantee module
2539 * percpu symbols fall inside the relocatable range.
2541 * @ai->dyn_size determines the number of bytes available for dynamic
2542 * allocation in the first chunk. The area between @ai->static_size +
2543 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2545 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2546 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2549 * @ai->atom_size is the allocation atom size and used as alignment
2552 * @ai->alloc_size is the allocation size and always multiple of
2553 * @ai->atom_size. This is larger than @ai->atom_size if
2554 * @ai->unit_size is larger than @ai->atom_size.
2556 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2557 * percpu areas. Units which should be colocated are put into the
2558 * same group. Dynamic VM areas will be allocated according to these
2559 * groupings. If @ai->nr_groups is zero, a single group containing
2560 * all units is assumed.
2562 * The caller should have mapped the first chunk at @base_addr and
2563 * copied static data to each unit.
2565 * The first chunk will always contain a static and a dynamic region.
2566 * However, the static region is not managed by any chunk. If the first
2567 * chunk also contains a reserved region, it is served by two chunks -
2568 * one for the reserved region and one for the dynamic region. They
2569 * share the same vm, but use offset regions in the area allocation map.
2570 * The chunk serving the dynamic region is circulated in the chunk slots
2571 * and available for dynamic allocation like any other chunk.
2573 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2576 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2577 size_t static_size, dyn_size;
2578 struct pcpu_chunk *chunk;
2579 unsigned long *group_offsets;
2580 size_t *group_sizes;
2581 unsigned long *unit_off;
2586 unsigned long tmp_addr;
2589 #define PCPU_SETUP_BUG_ON(cond) do { \
2590 if (unlikely(cond)) { \
2591 pr_emerg("failed to initialize, %s\n", #cond); \
2592 pr_emerg("cpu_possible_mask=%*pb\n", \
2593 cpumask_pr_args(cpu_possible_mask)); \
2594 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2600 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2602 PCPU_SETUP_BUG_ON(!ai->static_size);
2603 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2605 PCPU_SETUP_BUG_ON(!base_addr);
2606 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2607 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2608 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2609 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2610 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2611 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2612 PCPU_SETUP_BUG_ON(!ai->dyn_size);
2613 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2614 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2615 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2616 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2618 /* process group information and build config tables accordingly */
2619 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2620 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2622 panic("%s: Failed to allocate %zu bytes\n", __func__,
2625 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2626 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2628 panic("%s: Failed to allocate %zu bytes\n", __func__,
2631 alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2632 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2634 panic("%s: Failed to allocate %zu bytes\n", __func__,
2637 alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2638 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2640 panic("%s: Failed to allocate %zu bytes\n", __func__,
2643 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2644 unit_map[cpu] = UINT_MAX;
2646 pcpu_low_unit_cpu = NR_CPUS;
2647 pcpu_high_unit_cpu = NR_CPUS;
2649 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2650 const struct pcpu_group_info *gi = &ai->groups[group];
2652 group_offsets[group] = gi->base_offset;
2653 group_sizes[group] = gi->nr_units * ai->unit_size;
2655 for (i = 0; i < gi->nr_units; i++) {
2656 cpu = gi->cpu_map[i];
2660 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2661 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2662 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2664 unit_map[cpu] = unit + i;
2665 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2667 /* determine low/high unit_cpu */
2668 if (pcpu_low_unit_cpu == NR_CPUS ||
2669 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2670 pcpu_low_unit_cpu = cpu;
2671 if (pcpu_high_unit_cpu == NR_CPUS ||
2672 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2673 pcpu_high_unit_cpu = cpu;
2676 pcpu_nr_units = unit;
2678 for_each_possible_cpu(cpu)
2679 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2681 /* we're done parsing the input, undefine BUG macro and dump config */
2682 #undef PCPU_SETUP_BUG_ON
2683 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2685 pcpu_nr_groups = ai->nr_groups;
2686 pcpu_group_offsets = group_offsets;
2687 pcpu_group_sizes = group_sizes;
2688 pcpu_unit_map = unit_map;
2689 pcpu_unit_offsets = unit_off;
2691 /* determine basic parameters */
2692 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2693 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2694 pcpu_atom_size = ai->atom_size;
2695 pcpu_chunk_struct_size = struct_size(chunk, populated,
2696 BITS_TO_LONGS(pcpu_unit_pages));
2698 pcpu_stats_save_ai(ai);
2701 * Allocate chunk slots. The slots after the active slots are:
2702 * sidelined_slot - isolated, depopulated chunks
2703 * free_slot - fully free chunks
2704 * to_depopulate_slot - isolated, chunks to depopulate
2706 pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2707 pcpu_free_slot = pcpu_sidelined_slot + 1;
2708 pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2709 pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
2710 pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2711 sizeof(pcpu_chunk_lists[0]),
2713 if (!pcpu_chunk_lists)
2714 panic("%s: Failed to allocate %zu bytes\n", __func__,
2715 pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
2717 for (i = 0; i < pcpu_nr_slots; i++)
2718 INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2721 * The end of the static region needs to be aligned with the
2722 * minimum allocation size as this offsets the reserved and
2723 * dynamic region. The first chunk ends page aligned by
2724 * expanding the dynamic region, therefore the dynamic region
2725 * can be shrunk to compensate while still staying above the
2728 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2729 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2732 * Initialize first chunk.
2733 * If the reserved_size is non-zero, this initializes the reserved
2734 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2735 * and the dynamic region is initialized here. The first chunk,
2736 * pcpu_first_chunk, will always point to the chunk that serves
2737 * the dynamic region.
2739 tmp_addr = (unsigned long)base_addr + static_size;
2740 map_size = ai->reserved_size ?: dyn_size;
2741 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2743 /* init dynamic chunk if necessary */
2744 if (ai->reserved_size) {
2745 pcpu_reserved_chunk = chunk;
2747 tmp_addr = (unsigned long)base_addr + static_size +
2749 map_size = dyn_size;
2750 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2753 /* link the first chunk in */
2754 pcpu_first_chunk = chunk;
2755 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2756 pcpu_chunk_relocate(pcpu_first_chunk, -1);
2758 /* include all regions of the first chunk */
2759 pcpu_nr_populated += PFN_DOWN(size_sum);
2761 pcpu_stats_chunk_alloc();
2762 trace_percpu_create_chunk(base_addr);
2765 pcpu_base_addr = base_addr;
2770 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2771 [PCPU_FC_AUTO] = "auto",
2772 [PCPU_FC_EMBED] = "embed",
2773 [PCPU_FC_PAGE] = "page",
2776 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2778 static int __init percpu_alloc_setup(char *str)
2785 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2786 else if (!strcmp(str, "embed"))
2787 pcpu_chosen_fc = PCPU_FC_EMBED;
2789 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2790 else if (!strcmp(str, "page"))
2791 pcpu_chosen_fc = PCPU_FC_PAGE;
2794 pr_warn("unknown allocator %s specified\n", str);
2798 early_param("percpu_alloc", percpu_alloc_setup);
2801 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2802 * Build it if needed by the arch config or the generic setup is going
2805 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2806 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2807 #define BUILD_EMBED_FIRST_CHUNK
2810 /* build pcpu_page_first_chunk() iff needed by the arch config */
2811 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2812 #define BUILD_PAGE_FIRST_CHUNK
2815 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2816 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2818 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2819 * @reserved_size: the size of reserved percpu area in bytes
2820 * @dyn_size: minimum free size for dynamic allocation in bytes
2821 * @atom_size: allocation atom size
2822 * @cpu_distance_fn: callback to determine distance between cpus, optional
2824 * This function determines grouping of units, their mappings to cpus
2825 * and other parameters considering needed percpu size, allocation
2826 * atom size and distances between CPUs.
2828 * Groups are always multiples of atom size and CPUs which are of
2829 * LOCAL_DISTANCE both ways are grouped together and share space for
2830 * units in the same group. The returned configuration is guaranteed
2831 * to have CPUs on different nodes on different groups and >=75% usage
2832 * of allocated virtual address space.
2835 * On success, pointer to the new allocation_info is returned. On
2836 * failure, ERR_PTR value is returned.
2838 static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2839 size_t reserved_size, size_t dyn_size,
2841 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2843 static int group_map[NR_CPUS] __initdata;
2844 static int group_cnt[NR_CPUS] __initdata;
2845 static struct cpumask mask __initdata;
2846 const size_t static_size = __per_cpu_end - __per_cpu_start;
2847 int nr_groups = 1, nr_units = 0;
2848 size_t size_sum, min_unit_size, alloc_size;
2849 int upa, max_upa, best_upa; /* units_per_alloc */
2850 int last_allocs, group, unit;
2851 unsigned int cpu, tcpu;
2852 struct pcpu_alloc_info *ai;
2853 unsigned int *cpu_map;
2855 /* this function may be called multiple times */
2856 memset(group_map, 0, sizeof(group_map));
2857 memset(group_cnt, 0, sizeof(group_cnt));
2858 cpumask_clear(&mask);
2860 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2861 size_sum = PFN_ALIGN(static_size + reserved_size +
2862 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2863 dyn_size = size_sum - static_size - reserved_size;
2866 * Determine min_unit_size, alloc_size and max_upa such that
2867 * alloc_size is multiple of atom_size and is the smallest
2868 * which can accommodate 4k aligned segments which are equal to
2869 * or larger than min_unit_size.
2871 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2873 /* determine the maximum # of units that can fit in an allocation */
2874 alloc_size = roundup(min_unit_size, atom_size);
2875 upa = alloc_size / min_unit_size;
2876 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2880 cpumask_copy(&mask, cpu_possible_mask);
2882 /* group cpus according to their proximity */
2883 for (group = 0; !cpumask_empty(&mask); group++) {
2884 /* pop the group's first cpu */
2885 cpu = cpumask_first(&mask);
2886 group_map[cpu] = group;
2888 cpumask_clear_cpu(cpu, &mask);
2890 for_each_cpu(tcpu, &mask) {
2891 if (!cpu_distance_fn ||
2892 (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2893 cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2894 group_map[tcpu] = group;
2896 cpumask_clear_cpu(tcpu, &mask);
2903 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2904 * Expand the unit_size until we use >= 75% of the units allocated.
2905 * Related to atom_size, which could be much larger than the unit_size.
2907 last_allocs = INT_MAX;
2909 for (upa = max_upa; upa; upa--) {
2910 int allocs = 0, wasted = 0;
2912 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2915 for (group = 0; group < nr_groups; group++) {
2916 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2917 allocs += this_allocs;
2918 wasted += this_allocs * upa - group_cnt[group];
2922 * Don't accept if wastage is over 1/3. The
2923 * greater-than comparison ensures upa==1 always
2924 * passes the following check.
2926 if (wasted > num_possible_cpus() / 3)
2929 /* and then don't consume more memory */
2930 if (allocs > last_allocs)
2932 last_allocs = allocs;
2938 /* allocate and fill alloc_info */
2939 for (group = 0; group < nr_groups; group++)
2940 nr_units += roundup(group_cnt[group], upa);
2942 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2944 return ERR_PTR(-ENOMEM);
2945 cpu_map = ai->groups[0].cpu_map;
2947 for (group = 0; group < nr_groups; group++) {
2948 ai->groups[group].cpu_map = cpu_map;
2949 cpu_map += roundup(group_cnt[group], upa);
2952 ai->static_size = static_size;
2953 ai->reserved_size = reserved_size;
2954 ai->dyn_size = dyn_size;
2955 ai->unit_size = alloc_size / upa;
2956 ai->atom_size = atom_size;
2957 ai->alloc_size = alloc_size;
2959 for (group = 0, unit = 0; group < nr_groups; group++) {
2960 struct pcpu_group_info *gi = &ai->groups[group];
2963 * Initialize base_offset as if all groups are located
2964 * back-to-back. The caller should update this to
2965 * reflect actual allocation.
2967 gi->base_offset = unit * ai->unit_size;
2969 for_each_possible_cpu(cpu)
2970 if (group_map[cpu] == group)
2971 gi->cpu_map[gi->nr_units++] = cpu;
2972 gi->nr_units = roundup(gi->nr_units, upa);
2973 unit += gi->nr_units;
2975 BUG_ON(unit != nr_units);
2979 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2981 #if defined(BUILD_EMBED_FIRST_CHUNK)
2983 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2984 * @reserved_size: the size of reserved percpu area in bytes
2985 * @dyn_size: minimum free size for dynamic allocation in bytes
2986 * @atom_size: allocation atom size
2987 * @cpu_distance_fn: callback to determine distance between cpus, optional
2988 * @alloc_fn: function to allocate percpu page
2989 * @free_fn: function to free percpu page
2991 * This is a helper to ease setting up embedded first percpu chunk and
2992 * can be called where pcpu_setup_first_chunk() is expected.
2994 * If this function is used to setup the first chunk, it is allocated
2995 * by calling @alloc_fn and used as-is without being mapped into
2996 * vmalloc area. Allocations are always whole multiples of @atom_size
2997 * aligned to @atom_size.
2999 * This enables the first chunk to piggy back on the linear physical
3000 * mapping which often uses larger page size. Please note that this
3001 * can result in very sparse cpu->unit mapping on NUMA machines thus
3002 * requiring large vmalloc address space. Don't use this allocator if
3003 * vmalloc space is not orders of magnitude larger than distances
3004 * between node memory addresses (ie. 32bit NUMA machines).
3006 * @dyn_size specifies the minimum dynamic area size.
3008 * If the needed size is smaller than the minimum or specified unit
3009 * size, the leftover is returned using @free_fn.
3012 * 0 on success, -errno on failure.
3014 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3016 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
3017 pcpu_fc_alloc_fn_t alloc_fn,
3018 pcpu_fc_free_fn_t free_fn)
3020 void *base = (void *)ULONG_MAX;
3021 void **areas = NULL;
3022 struct pcpu_alloc_info *ai;
3023 size_t size_sum, areas_size;
3024 unsigned long max_distance;
3025 int group, i, highest_group, rc = 0;
3027 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3032 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3033 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3035 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3041 /* allocate, copy and determine base address & max_distance */
3043 for (group = 0; group < ai->nr_groups; group++) {
3044 struct pcpu_group_info *gi = &ai->groups[group];
3045 unsigned int cpu = NR_CPUS;
3048 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3049 cpu = gi->cpu_map[i];
3050 BUG_ON(cpu == NR_CPUS);
3052 /* allocate space for the whole group */
3053 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
3056 goto out_free_areas;
3058 /* kmemleak tracks the percpu allocations separately */
3062 base = min(ptr, base);
3063 if (ptr > areas[highest_group])
3064 highest_group = group;
3066 max_distance = areas[highest_group] - base;
3067 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3069 /* warn if maximum distance is further than 75% of vmalloc space */
3070 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3071 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3072 max_distance, VMALLOC_TOTAL);
3073 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3074 /* and fail if we have fallback */
3076 goto out_free_areas;
3081 * Copy data and free unused parts. This should happen after all
3082 * allocations are complete; otherwise, we may end up with
3083 * overlapping groups.
3085 for (group = 0; group < ai->nr_groups; group++) {
3086 struct pcpu_group_info *gi = &ai->groups[group];
3087 void *ptr = areas[group];
3089 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3090 if (gi->cpu_map[i] == NR_CPUS) {
3091 /* unused unit, free whole */
3092 free_fn(ptr, ai->unit_size);
3095 /* copy and return the unused part */
3096 memcpy(ptr, __per_cpu_load, ai->static_size);
3097 free_fn(ptr + size_sum, ai->unit_size - size_sum);
3101 /* base address is now known, determine group base offsets */
3102 for (group = 0; group < ai->nr_groups; group++) {
3103 ai->groups[group].base_offset = areas[group] - base;
3106 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3107 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3108 ai->dyn_size, ai->unit_size);
3110 pcpu_setup_first_chunk(ai, base);
3114 for (group = 0; group < ai->nr_groups; group++)
3116 free_fn(areas[group],
3117 ai->groups[group].nr_units * ai->unit_size);
3119 pcpu_free_alloc_info(ai);
3121 memblock_free_early(__pa(areas), areas_size);
3124 #endif /* BUILD_EMBED_FIRST_CHUNK */
3126 #ifdef BUILD_PAGE_FIRST_CHUNK
3128 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3129 * @reserved_size: the size of reserved percpu area in bytes
3130 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
3131 * @free_fn: function to free percpu page, always called with PAGE_SIZE
3132 * @populate_pte_fn: function to populate pte
3134 * This is a helper to ease setting up page-remapped first percpu
3135 * chunk and can be called where pcpu_setup_first_chunk() is expected.
3137 * This is the basic allocator. Static percpu area is allocated
3138 * page-by-page into vmalloc area.
3141 * 0 on success, -errno on failure.
3143 int __init pcpu_page_first_chunk(size_t reserved_size,
3144 pcpu_fc_alloc_fn_t alloc_fn,
3145 pcpu_fc_free_fn_t free_fn,
3146 pcpu_fc_populate_pte_fn_t populate_pte_fn)
3148 static struct vm_struct vm;
3149 struct pcpu_alloc_info *ai;
3153 struct page **pages;
3154 int unit, i, j, rc = 0;
3158 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3160 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3163 BUG_ON(ai->nr_groups != 1);
3164 upa = ai->alloc_size/ai->unit_size;
3165 nr_g0_units = roundup(num_possible_cpus(), upa);
3166 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3167 pcpu_free_alloc_info(ai);
3171 unit_pages = ai->unit_size >> PAGE_SHIFT;
3173 /* unaligned allocations can't be freed, round up to page size */
3174 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3176 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3178 panic("%s: Failed to allocate %zu bytes\n", __func__,
3181 /* allocate pages */
3183 for (unit = 0; unit < num_possible_cpus(); unit++) {
3184 unsigned int cpu = ai->groups[0].cpu_map[unit];
3185 for (i = 0; i < unit_pages; i++) {
3188 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3190 pr_warn("failed to allocate %s page for cpu%u\n",
3194 /* kmemleak tracks the percpu allocations separately */
3196 pages[j++] = virt_to_page(ptr);
3200 /* allocate vm area, map the pages and copy static data */
3201 vm.flags = VM_ALLOC;
3202 vm.size = num_possible_cpus() * ai->unit_size;
3203 vm_area_register_early(&vm, PAGE_SIZE);
3205 for (unit = 0; unit < num_possible_cpus(); unit++) {
3206 unsigned long unit_addr =
3207 (unsigned long)vm.addr + unit * ai->unit_size;
3209 for (i = 0; i < unit_pages; i++)
3210 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
3212 /* pte already populated, the following shouldn't fail */
3213 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3216 panic("failed to map percpu area, err=%d\n", rc);
3219 * FIXME: Archs with virtual cache should flush local
3220 * cache for the linear mapping here - something
3221 * equivalent to flush_cache_vmap() on the local cpu.
3222 * flush_cache_vmap() can't be used as most supporting
3223 * data structures are not set up yet.
3226 /* copy static data */
3227 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3230 /* we're ready, commit */
3231 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3232 unit_pages, psize_str, ai->static_size,
3233 ai->reserved_size, ai->dyn_size);
3235 pcpu_setup_first_chunk(ai, vm.addr);
3240 free_fn(page_address(pages[j]), PAGE_SIZE);
3243 memblock_free_early(__pa(pages), pages_size);
3244 pcpu_free_alloc_info(ai);
3247 #endif /* BUILD_PAGE_FIRST_CHUNK */
3249 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3251 * Generic SMP percpu area setup.
3253 * The embedding helper is used because its behavior closely resembles
3254 * the original non-dynamic generic percpu area setup. This is
3255 * important because many archs have addressing restrictions and might
3256 * fail if the percpu area is located far away from the previous
3257 * location. As an added bonus, in non-NUMA cases, embedding is
3258 * generally a good idea TLB-wise because percpu area can piggy back
3259 * on the physical linear memory mapping which uses large page
3260 * mappings on applicable archs.
3262 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3263 EXPORT_SYMBOL(__per_cpu_offset);
3265 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3268 return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
3271 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3273 memblock_free_early(__pa(ptr), size);
3276 void __init setup_per_cpu_areas(void)
3278 unsigned long delta;
3283 * Always reserve area for module percpu variables. That's
3284 * what the legacy allocator did.
3286 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3287 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3288 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3290 panic("Failed to initialize percpu areas.");
3292 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3293 for_each_possible_cpu(cpu)
3294 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3296 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3298 #else /* CONFIG_SMP */
3301 * UP percpu area setup.
3303 * UP always uses km-based percpu allocator with identity mapping.
3304 * Static percpu variables are indistinguishable from the usual static
3305 * variables and don't require any special preparation.
3307 void __init setup_per_cpu_areas(void)
3309 const size_t unit_size =
3310 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3311 PERCPU_DYNAMIC_RESERVE));
3312 struct pcpu_alloc_info *ai;
3315 ai = pcpu_alloc_alloc_info(1, 1);
3316 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3318 panic("Failed to allocate memory for percpu areas.");
3319 /* kmemleak tracks the percpu allocations separately */
3322 ai->dyn_size = unit_size;
3323 ai->unit_size = unit_size;
3324 ai->atom_size = unit_size;
3325 ai->alloc_size = unit_size;
3326 ai->groups[0].nr_units = 1;
3327 ai->groups[0].cpu_map[0] = 0;
3329 pcpu_setup_first_chunk(ai, fc);
3330 pcpu_free_alloc_info(ai);
3333 #endif /* CONFIG_SMP */
3336 * pcpu_nr_pages - calculate total number of populated backing pages
3338 * This reflects the number of pages populated to back chunks. Metadata is
3339 * excluded in the number exposed in meminfo as the number of backing pages
3340 * scales with the number of cpus and can quickly outweigh the memory used for
3341 * metadata. It also keeps this calculation nice and simple.
3344 * Total number of populated backing pages in use by the allocator.
3346 unsigned long pcpu_nr_pages(void)
3348 return pcpu_nr_populated * pcpu_nr_units;
3352 * Percpu allocator is initialized early during boot when neither slab or
3353 * workqueue is available. Plug async management until everything is up
3356 static int __init percpu_enable_async(void)
3358 pcpu_async_enabled = true;
3361 subsys_initcall(percpu_enable_async);