2 * SLOB Allocator: Simple List Of Blocks
4 * Matt Mackall <mpm@selenic.com> 12/30/03
6 * NUMA support by Paul Mundt, 2007.
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact
32 * allocation size in page->private so that it can be used to accurately
33 * provide ksize(). These objects are detected in kfree() because slob_page()
36 * SLAB is emulated on top of SLOB by simply calling constructors and
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_exact_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
63 #include <linux/swap.h> /* struct reclaim_state */
64 #include <linux/cache.h>
65 #include <linux/init.h>
66 #include <linux/export.h>
67 #include <linux/rcupdate.h>
68 #include <linux/list.h>
69 #include <linux/kmemleak.h>
71 #include <trace/events/kmem.h>
73 #include <linux/atomic.h>
76 * slob_block has a field 'units', which indicates size of block if +ve,
77 * or offset of next block if -ve (in SLOB_UNITs).
79 * Free blocks of size 1 unit simply contain the offset of the next block.
80 * Those with larger size contain their size in the first SLOB_UNIT of
81 * memory, and the offset of the next free block in the second SLOB_UNIT.
83 #if PAGE_SIZE <= (32767 * 2)
84 typedef s16 slobidx_t;
86 typedef s32 slobidx_t;
92 typedef struct slob_block slob_t;
95 * free_slob_page: call before a slob_page is returned to the page allocator.
97 static inline void free_slob_page(struct page *sp)
99 reset_page_mapcount(sp);
104 * All partially free slob pages go on these lists.
106 #define SLOB_BREAK1 256
107 #define SLOB_BREAK2 1024
108 static LIST_HEAD(free_slob_small);
109 static LIST_HEAD(free_slob_medium);
110 static LIST_HEAD(free_slob_large);
113 * is_slob_page: True for all slob pages (false for bigblock pages)
115 static inline int is_slob_page(struct page *sp)
120 static inline void set_slob_page(struct page *sp)
125 static inline void clear_slob_page(struct page *sp)
130 static inline struct page *slob_page(const void *addr)
132 return virt_to_page(addr);
136 * slob_page_free: true for pages on free_slob_pages list.
138 static inline int slob_page_free(struct page *sp)
140 return PageSlobFree(sp);
143 static void set_slob_page_free(struct page *sp, struct list_head *list)
145 list_add(&sp->list, list);
146 __SetPageSlobFree(sp);
149 static inline void clear_slob_page_free(struct page *sp)
152 __ClearPageSlobFree(sp);
155 #define SLOB_UNIT sizeof(slob_t)
156 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
157 #define SLOB_ALIGN L1_CACHE_BYTES
160 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
161 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
162 * the block using call_rcu.
165 struct rcu_head head;
170 * slob_lock protects all slob allocator structures.
172 static DEFINE_SPINLOCK(slob_lock);
175 * Encode the given size and next info into a free slob block s.
177 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
179 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
180 slobidx_t offset = next - base;
186 s[0].units = -offset;
190 * Return the size of a slob block.
192 static slobidx_t slob_units(slob_t *s)
200 * Return the next free slob block pointer after this one.
202 static slob_t *slob_next(slob_t *s)
204 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
215 * Returns true if s is the last free block in its page.
217 static int slob_last(slob_t *s)
219 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
222 static void *slob_new_pages(gfp_t gfp, int order, int node)
228 page = alloc_pages_exact_node(node, gfp, order);
231 page = alloc_pages(gfp, order);
236 return page_address(page);
239 static void slob_free_pages(void *b, int order)
241 if (current->reclaim_state)
242 current->reclaim_state->reclaimed_slab += 1 << order;
243 free_pages((unsigned long)b, order);
247 * Allocate a slob block within a given slob_page sp.
249 static void *slob_page_alloc(struct page *sp, size_t size, int align)
251 slob_t *prev, *cur, *aligned = NULL;
252 int delta = 0, units = SLOB_UNITS(size);
254 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
255 slobidx_t avail = slob_units(cur);
258 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
259 delta = aligned - cur;
261 if (avail >= units + delta) { /* room enough? */
264 if (delta) { /* need to fragment head to align? */
265 next = slob_next(cur);
266 set_slob(aligned, avail - delta, next);
267 set_slob(cur, delta, aligned);
270 avail = slob_units(cur);
273 next = slob_next(cur);
274 if (avail == units) { /* exact fit? unlink. */
276 set_slob(prev, slob_units(prev), next);
279 } else { /* fragment */
281 set_slob(prev, slob_units(prev), cur + units);
283 sp->freelist = cur + units;
284 set_slob(cur + units, avail - units, next);
289 clear_slob_page_free(sp);
298 * slob_alloc: entry point into the slob allocator.
300 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
303 struct list_head *prev;
304 struct list_head *slob_list;
308 if (size < SLOB_BREAK1)
309 slob_list = &free_slob_small;
310 else if (size < SLOB_BREAK2)
311 slob_list = &free_slob_medium;
313 slob_list = &free_slob_large;
315 spin_lock_irqsave(&slob_lock, flags);
316 /* Iterate through each partially free page, try to find room */
317 list_for_each_entry(sp, slob_list, list) {
320 * If there's a node specification, search for a partial
321 * page with a matching node id in the freelist.
323 if (node != -1 && page_to_nid(sp) != node)
326 /* Enough room on this page? */
327 if (sp->units < SLOB_UNITS(size))
330 /* Attempt to alloc */
331 prev = sp->list.prev;
332 b = slob_page_alloc(sp, size, align);
336 /* Improve fragment distribution and reduce our average
337 * search time by starting our next search here. (see
338 * Knuth vol 1, sec 2.5, pg 449) */
339 if (prev != slob_list->prev &&
340 slob_list->next != prev->next)
341 list_move_tail(slob_list, prev->next);
344 spin_unlock_irqrestore(&slob_lock, flags);
346 /* Not enough space: must allocate a new page */
348 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
354 spin_lock_irqsave(&slob_lock, flags);
355 sp->units = SLOB_UNITS(PAGE_SIZE);
357 INIT_LIST_HEAD(&sp->list);
358 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
359 set_slob_page_free(sp, slob_list);
360 b = slob_page_alloc(sp, size, align);
362 spin_unlock_irqrestore(&slob_lock, flags);
364 if (unlikely((gfp & __GFP_ZERO) && b))
370 * slob_free: entry point into the slob allocator.
372 static void slob_free(void *block, int size)
375 slob_t *prev, *next, *b = (slob_t *)block;
378 struct list_head *slob_list;
380 if (unlikely(ZERO_OR_NULL_PTR(block)))
384 sp = slob_page(block);
385 units = SLOB_UNITS(size);
387 spin_lock_irqsave(&slob_lock, flags);
389 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
390 /* Go directly to page allocator. Do not pass slob allocator */
391 if (slob_page_free(sp))
392 clear_slob_page_free(sp);
393 spin_unlock_irqrestore(&slob_lock, flags);
396 slob_free_pages(b, 0);
400 if (!slob_page_free(sp)) {
401 /* This slob page is about to become partially free. Easy! */
405 (void *)((unsigned long)(b +
406 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
407 if (size < SLOB_BREAK1)
408 slob_list = &free_slob_small;
409 else if (size < SLOB_BREAK2)
410 slob_list = &free_slob_medium;
412 slob_list = &free_slob_large;
413 set_slob_page_free(sp, slob_list);
418 * Otherwise the page is already partially free, so find reinsertion
423 if (b < (slob_t *)sp->freelist) {
424 if (b + units == sp->freelist) {
425 units += slob_units(sp->freelist);
426 sp->freelist = slob_next(sp->freelist);
428 set_slob(b, units, sp->freelist);
432 next = slob_next(prev);
435 next = slob_next(prev);
438 if (!slob_last(prev) && b + units == next) {
439 units += slob_units(next);
440 set_slob(b, units, slob_next(next));
442 set_slob(b, units, next);
444 if (prev + slob_units(prev) == b) {
445 units = slob_units(b) + slob_units(prev);
446 set_slob(prev, units, slob_next(b));
448 set_slob(prev, slob_units(prev), b);
451 spin_unlock_irqrestore(&slob_lock, flags);
455 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
458 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
461 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
464 gfp &= gfp_allowed_mask;
466 lockdep_trace_alloc(gfp);
468 if (size < PAGE_SIZE - align) {
470 return ZERO_SIZE_PTR;
472 m = slob_alloc(size + align, gfp, align, node);
477 ret = (void *)m + align;
479 trace_kmalloc_node(_RET_IP_, ret,
480 size, size + align, gfp, node);
482 unsigned int order = get_order(size);
486 ret = slob_new_pages(gfp, order, node);
489 page = virt_to_page(ret);
490 page->private = size;
493 trace_kmalloc_node(_RET_IP_, ret,
494 size, PAGE_SIZE << order, gfp, node);
497 kmemleak_alloc(ret, size, 1, gfp);
500 EXPORT_SYMBOL(__kmalloc_node);
502 void kfree(const void *block)
506 trace_kfree(_RET_IP_, block);
508 if (unlikely(ZERO_OR_NULL_PTR(block)))
510 kmemleak_free(block);
512 sp = slob_page(block);
513 if (is_slob_page(sp)) {
514 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
515 unsigned int *m = (unsigned int *)(block - align);
516 slob_free(m, *m + align);
520 EXPORT_SYMBOL(kfree);
522 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
523 size_t ksize(const void *block)
528 if (unlikely(block == ZERO_SIZE_PTR))
531 sp = slob_page(block);
532 if (is_slob_page(sp)) {
533 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
534 unsigned int *m = (unsigned int *)(block - align);
535 return SLOB_UNITS(*m) * SLOB_UNIT;
539 EXPORT_SYMBOL(ksize);
542 unsigned int size, align;
545 void (*ctor)(void *);
548 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
549 size_t align, unsigned long flags, void (*ctor)(void *))
551 struct kmem_cache *c;
553 c = slob_alloc(sizeof(struct kmem_cache),
554 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
559 if (flags & SLAB_DESTROY_BY_RCU) {
560 /* leave room for rcu footer at the end of object */
561 c->size += sizeof(struct slob_rcu);
565 /* ignore alignment unless it's forced */
566 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
567 if (c->align < ARCH_SLAB_MINALIGN)
568 c->align = ARCH_SLAB_MINALIGN;
569 if (c->align < align)
571 } else if (flags & SLAB_PANIC)
572 panic("Cannot create slab cache %s\n", name);
574 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
577 EXPORT_SYMBOL(kmem_cache_create);
579 void kmem_cache_destroy(struct kmem_cache *c)
582 if (c->flags & SLAB_DESTROY_BY_RCU)
584 slob_free(c, sizeof(struct kmem_cache));
586 EXPORT_SYMBOL(kmem_cache_destroy);
588 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
592 flags &= gfp_allowed_mask;
594 lockdep_trace_alloc(flags);
596 if (c->size < PAGE_SIZE) {
597 b = slob_alloc(c->size, flags, c->align, node);
598 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
599 SLOB_UNITS(c->size) * SLOB_UNIT,
602 b = slob_new_pages(flags, get_order(c->size), node);
603 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
604 PAGE_SIZE << get_order(c->size),
611 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
614 EXPORT_SYMBOL(kmem_cache_alloc_node);
616 static void __kmem_cache_free(void *b, int size)
618 if (size < PAGE_SIZE)
621 slob_free_pages(b, get_order(size));
624 static void kmem_rcu_free(struct rcu_head *head)
626 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
627 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
629 __kmem_cache_free(b, slob_rcu->size);
632 void kmem_cache_free(struct kmem_cache *c, void *b)
634 kmemleak_free_recursive(b, c->flags);
635 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
636 struct slob_rcu *slob_rcu;
637 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
638 slob_rcu->size = c->size;
639 call_rcu(&slob_rcu->head, kmem_rcu_free);
641 __kmem_cache_free(b, c->size);
644 trace_kmem_cache_free(_RET_IP_, b);
646 EXPORT_SYMBOL(kmem_cache_free);
648 unsigned int kmem_cache_size(struct kmem_cache *c)
652 EXPORT_SYMBOL(kmem_cache_size);
654 int kmem_cache_shrink(struct kmem_cache *d)
658 EXPORT_SYMBOL(kmem_cache_shrink);
660 static unsigned int slob_ready __read_mostly;
662 int slab_is_available(void)
667 void __init kmem_cache_init(void)
672 void __init kmem_cache_init_late(void)