1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 slab_flags_t flags; /* Active flags on the slab */
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
33 #else /* !CONFIG_SLOB */
35 struct memcg_cache_array {
37 struct kmem_cache *entries[0];
41 * This is the main placeholder for memcg-related information in kmem caches.
42 * Both the root cache and the child caches will have it. For the root cache,
43 * this will hold a dynamically allocated array large enough to hold
44 * information about the currently limited memcgs in the system. To allow the
45 * array to be accessed without taking any locks, on relocation we free the old
46 * version only after a grace period.
48 * Root and child caches hold different metadata.
50 * @root_cache: Common to root and child caches. NULL for root, pointer to
51 * the root cache for children.
53 * The following fields are specific to root caches.
55 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
56 * used to index child cachces during allocation and cleared
57 * early during shutdown.
59 * @root_caches_node: List node for slab_root_caches list.
61 * @children: List of all child caches. While the child caches are also
62 * reachable through @memcg_caches, a child cache remains on
63 * this list until it is actually destroyed.
65 * The following fields are specific to child caches.
67 * @memcg: Pointer to the memcg this cache belongs to.
69 * @children_node: List node for @root_cache->children list.
71 * @kmem_caches_node: List node for @memcg->kmem_caches list.
73 struct memcg_cache_params {
74 struct kmem_cache *root_cache;
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
88 void (*work_fn)(struct kmem_cache *);
90 struct rcu_head rcu_head;
91 struct work_struct work;
96 #endif /* CONFIG_SLOB */
99 #include <linux/slab_def.h>
103 #include <linux/slub_def.h>
106 #include <linux/memcontrol.h>
107 #include <linux/fault-inject.h>
108 #include <linux/kasan.h>
109 #include <linux/kmemleak.h>
110 #include <linux/random.h>
111 #include <linux/sched/mm.h>
112 #include <linux/kmemleak.h>
115 * State of the slab allocator.
117 * This is used to describe the states of the allocator during bootup.
118 * Allocators use this to gradually bootstrap themselves. Most allocators
119 * have the problem that the structures used for managing slab caches are
120 * allocated from slab caches themselves.
123 DOWN, /* No slab functionality yet */
124 PARTIAL, /* SLUB: kmem_cache_node available */
125 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
126 UP, /* Slab caches usable but not all extras yet */
127 FULL /* Everything is working */
130 extern enum slab_state slab_state;
132 /* The slab cache mutex protects the management structures during changes */
133 extern struct mutex slab_mutex;
135 /* The list of all slab caches on the system */
136 extern struct list_head slab_caches;
138 /* The slab cache that manages slab cache information */
139 extern struct kmem_cache *kmem_cache;
141 /* A table of kmalloc cache names and sizes */
142 extern const struct kmalloc_info_struct {
143 const char *name[NR_KMALLOC_TYPES];
148 /* Kmalloc array related functions */
149 void setup_kmalloc_cache_index_table(void);
150 void create_kmalloc_caches(slab_flags_t);
152 /* Find the kmalloc slab corresponding for a certain size */
153 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
156 gfp_t kmalloc_fix_flags(gfp_t flags);
158 /* Functions provided by the slab allocators */
159 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
161 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
162 slab_flags_t flags, unsigned int useroffset,
163 unsigned int usersize);
164 extern void create_boot_cache(struct kmem_cache *, const char *name,
165 unsigned int size, slab_flags_t flags,
166 unsigned int useroffset, unsigned int usersize);
168 int slab_unmergeable(struct kmem_cache *s);
169 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
170 slab_flags_t flags, const char *name, void (*ctor)(void *));
173 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
174 slab_flags_t flags, void (*ctor)(void *));
176 slab_flags_t kmem_cache_flags(unsigned int object_size,
177 slab_flags_t flags, const char *name,
178 void (*ctor)(void *));
180 static inline struct kmem_cache *
181 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
182 slab_flags_t flags, void (*ctor)(void *))
185 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
186 slab_flags_t flags, const char *name,
187 void (*ctor)(void *))
194 /* Legal flag mask for kmem_cache_create(), for various configurations */
195 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
196 SLAB_CACHE_DMA32 | SLAB_PANIC | \
197 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
199 #if defined(CONFIG_DEBUG_SLAB)
200 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
201 #elif defined(CONFIG_SLUB_DEBUG)
202 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
203 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
205 #define SLAB_DEBUG_FLAGS (0)
208 #if defined(CONFIG_SLAB)
209 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
210 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
212 #elif defined(CONFIG_SLUB)
213 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
214 SLAB_TEMPORARY | SLAB_ACCOUNT)
216 #define SLAB_CACHE_FLAGS (0)
219 /* Common flags available with current configuration */
220 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
222 /* Common flags permitted for kmem_cache_create */
223 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
228 SLAB_CONSISTENCY_CHECKS | \
231 SLAB_RECLAIM_ACCOUNT | \
235 bool __kmem_cache_empty(struct kmem_cache *);
236 int __kmem_cache_shutdown(struct kmem_cache *);
237 void __kmem_cache_release(struct kmem_cache *);
238 int __kmem_cache_shrink(struct kmem_cache *);
239 void __kmemcg_cache_deactivate(struct kmem_cache *s);
240 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
241 void slab_kmem_cache_release(struct kmem_cache *);
242 void kmem_cache_shrink_all(struct kmem_cache *s);
248 unsigned long active_objs;
249 unsigned long num_objs;
250 unsigned long active_slabs;
251 unsigned long num_slabs;
252 unsigned long shared_avail;
254 unsigned int batchcount;
256 unsigned int objects_per_slab;
257 unsigned int cache_order;
260 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
261 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
262 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
263 size_t count, loff_t *ppos);
266 * Generic implementation of bulk operations
267 * These are useful for situations in which the allocator cannot
268 * perform optimizations. In that case segments of the object listed
269 * may be allocated or freed using these operations.
271 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
272 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
274 static inline int cache_vmstat_idx(struct kmem_cache *s)
276 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
277 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
280 #ifdef CONFIG_SLUB_DEBUG
281 #ifdef CONFIG_SLUB_DEBUG_ON
282 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
284 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
286 extern void print_tracking(struct kmem_cache *s, void *object);
288 static inline void print_tracking(struct kmem_cache *s, void *object)
294 * Returns true if any of the specified slub_debug flags is enabled for the
295 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
298 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
300 #ifdef CONFIG_SLUB_DEBUG
301 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
302 if (static_branch_unlikely(&slub_debug_enabled))
303 return s->flags & flags;
308 #ifdef CONFIG_MEMCG_KMEM
310 /* List of all root caches. */
311 extern struct list_head slab_root_caches;
312 #define root_caches_node memcg_params.__root_caches_node
315 * Iterate over all memcg caches of the given root cache. The caller must hold
318 #define for_each_memcg_cache(iter, root) \
319 list_for_each_entry(iter, &(root)->memcg_params.children, \
320 memcg_params.children_node)
322 static inline bool is_root_cache(struct kmem_cache *s)
324 return !s->memcg_params.root_cache;
327 static inline bool slab_equal_or_root(struct kmem_cache *s,
328 struct kmem_cache *p)
330 return p == s || p == s->memcg_params.root_cache;
334 * We use suffixes to the name in memcg because we can't have caches
335 * created in the system with the same name. But when we print them
336 * locally, better refer to them with the base name
338 static inline const char *cache_name(struct kmem_cache *s)
340 if (!is_root_cache(s))
341 s = s->memcg_params.root_cache;
345 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
347 if (is_root_cache(s))
349 return s->memcg_params.root_cache;
352 static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
355 * page->mem_cgroup and page->obj_cgroups are sharing the same
356 * space. To distinguish between them in case we don't know for sure
357 * that the page is a slab page (e.g. page_cgroup_ino()), let's
358 * always set the lowest bit of obj_cgroups.
360 return (struct obj_cgroup **)
361 ((unsigned long)page->obj_cgroups & ~0x1UL);
365 * Expects a pointer to a slab page. Please note, that PageSlab() check
366 * isn't sufficient, as it returns true also for tail compound slab pages,
367 * which do not have slab_cache pointer set.
368 * So this function assumes that the page can pass PageSlab() && !PageTail()
371 * The kmem_cache can be reparented asynchronously. The caller must ensure
372 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
374 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
376 struct kmem_cache *s;
378 s = READ_ONCE(page->slab_cache);
379 if (s && !is_root_cache(s))
380 return READ_ONCE(s->memcg_params.memcg);
386 * Charge the slab page belonging to the non-root kmem_cache.
387 * Can be called for non-root kmem_caches only.
389 static __always_inline int memcg_charge_slab(struct page *page,
390 gfp_t gfp, int order,
391 struct kmem_cache *s)
393 int nr_pages = 1 << order;
394 struct mem_cgroup *memcg;
395 struct lruvec *lruvec;
399 memcg = READ_ONCE(s->memcg_params.memcg);
400 while (memcg && !css_tryget_online(&memcg->css))
401 memcg = parent_mem_cgroup(memcg);
404 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
405 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
406 nr_pages << PAGE_SHIFT);
407 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
411 ret = memcg_kmem_charge(memcg, gfp, nr_pages);
415 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
416 mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT);
418 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
420 css_put(&memcg->css);
425 * Uncharge a slab page belonging to a non-root kmem_cache.
426 * Can be called for non-root kmem_caches only.
428 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
429 struct kmem_cache *s)
431 int nr_pages = 1 << order;
432 struct mem_cgroup *memcg;
433 struct lruvec *lruvec;
436 memcg = READ_ONCE(s->memcg_params.memcg);
437 if (likely(!mem_cgroup_is_root(memcg))) {
438 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
439 mod_lruvec_state(lruvec, cache_vmstat_idx(s),
440 -(nr_pages << PAGE_SHIFT));
441 memcg_kmem_uncharge(memcg, nr_pages);
443 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
444 -(nr_pages << PAGE_SHIFT));
448 percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
451 static inline int memcg_alloc_page_obj_cgroups(struct page *page,
452 struct kmem_cache *s, gfp_t gfp)
454 unsigned int objects = objs_per_slab_page(s, page);
457 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
462 kmemleak_not_leak(vec);
463 page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
467 static inline void memcg_free_page_obj_cgroups(struct page *page)
469 kfree(page_obj_cgroups(page));
470 page->obj_cgroups = NULL;
473 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
474 struct obj_cgroup *objcg,
475 size_t size, void **p)
481 for (i = 0; i < size; i++) {
483 page = virt_to_head_page(p[i]);
484 off = obj_to_index(s, page, p[i]);
485 obj_cgroup_get(objcg);
486 page_obj_cgroups(page)[off] = objcg;
489 obj_cgroup_put(objcg);
490 memcg_kmem_put_cache(s);
493 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
496 struct obj_cgroup *objcg;
499 if (!memcg_kmem_enabled() || is_root_cache(s))
502 off = obj_to_index(s, page, p);
503 objcg = page_obj_cgroups(page)[off];
504 page_obj_cgroups(page)[off] = NULL;
505 obj_cgroup_put(objcg);
508 extern void slab_init_memcg_params(struct kmem_cache *);
509 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
511 #else /* CONFIG_MEMCG_KMEM */
513 /* If !memcg, all caches are root. */
514 #define slab_root_caches slab_caches
515 #define root_caches_node list
517 #define for_each_memcg_cache(iter, root) \
518 for ((void)(iter), (void)(root); 0; )
520 static inline bool is_root_cache(struct kmem_cache *s)
525 static inline bool slab_equal_or_root(struct kmem_cache *s,
526 struct kmem_cache *p)
531 static inline const char *cache_name(struct kmem_cache *s)
536 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
541 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
546 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
547 struct kmem_cache *s)
552 static inline void memcg_uncharge_slab(struct page *page, int order,
553 struct kmem_cache *s)
557 static inline int memcg_alloc_page_obj_cgroups(struct page *page,
558 struct kmem_cache *s, gfp_t gfp)
563 static inline void memcg_free_page_obj_cgroups(struct page *page)
567 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
568 struct obj_cgroup *objcg,
569 size_t size, void **p)
573 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
578 static inline void slab_init_memcg_params(struct kmem_cache *s)
582 static inline void memcg_link_cache(struct kmem_cache *s,
583 struct mem_cgroup *memcg)
587 #endif /* CONFIG_MEMCG_KMEM */
589 static inline struct kmem_cache *virt_to_cache(const void *obj)
593 page = virt_to_head_page(obj);
594 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
597 return page->slab_cache;
600 static __always_inline int charge_slab_page(struct page *page,
601 gfp_t gfp, int order,
602 struct kmem_cache *s)
606 if (is_root_cache(s)) {
607 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
612 ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
616 return memcg_charge_slab(page, gfp, order, s);
619 static __always_inline void uncharge_slab_page(struct page *page, int order,
620 struct kmem_cache *s)
622 if (is_root_cache(s)) {
623 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
624 -(PAGE_SIZE << order));
628 memcg_free_page_obj_cgroups(page);
629 memcg_uncharge_slab(page, order, s);
632 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
634 struct kmem_cache *cachep;
636 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
637 !memcg_kmem_enabled() &&
638 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
641 cachep = virt_to_cache(x);
642 if (WARN(cachep && !slab_equal_or_root(cachep, s),
643 "%s: Wrong slab cache. %s but object is from %s\n",
644 __func__, s->name, cachep->name))
645 print_tracking(cachep, x);
649 static inline size_t slab_ksize(const struct kmem_cache *s)
652 return s->object_size;
654 #else /* CONFIG_SLUB */
655 # ifdef CONFIG_SLUB_DEBUG
657 * Debugging requires use of the padding between object
658 * and whatever may come after it.
660 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
661 return s->object_size;
663 if (s->flags & SLAB_KASAN)
664 return s->object_size;
666 * If we have the need to store the freelist pointer
667 * back there or track user information then we can
668 * only use the space before that information.
670 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
673 * Else we can use all the padding etc for the allocation
679 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
680 struct obj_cgroup **objcgp,
681 size_t size, gfp_t flags)
683 flags &= gfp_allowed_mask;
685 fs_reclaim_acquire(flags);
686 fs_reclaim_release(flags);
688 might_sleep_if(gfpflags_allow_blocking(flags));
690 if (should_failslab(s, flags))
693 if (memcg_kmem_enabled() &&
694 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
695 return memcg_kmem_get_cache(s, objcgp);
700 static inline void slab_post_alloc_hook(struct kmem_cache *s,
701 struct obj_cgroup *objcg,
702 gfp_t flags, size_t size, void **p)
706 flags &= gfp_allowed_mask;
707 for (i = 0; i < size; i++) {
708 p[i] = kasan_slab_alloc(s, p[i], flags);
709 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
710 kmemleak_alloc_recursive(p[i], s->object_size, 1,
714 if (memcg_kmem_enabled() && !is_root_cache(s))
715 memcg_slab_post_alloc_hook(s, objcg, size, p);
720 * The slab lists for all objects.
722 struct kmem_cache_node {
723 spinlock_t list_lock;
726 struct list_head slabs_partial; /* partial list first, better asm code */
727 struct list_head slabs_full;
728 struct list_head slabs_free;
729 unsigned long total_slabs; /* length of all slab lists */
730 unsigned long free_slabs; /* length of free slab list only */
731 unsigned long free_objects;
732 unsigned int free_limit;
733 unsigned int colour_next; /* Per-node cache coloring */
734 struct array_cache *shared; /* shared per node */
735 struct alien_cache **alien; /* on other nodes */
736 unsigned long next_reap; /* updated without locking */
737 int free_touched; /* updated without locking */
741 unsigned long nr_partial;
742 struct list_head partial;
743 #ifdef CONFIG_SLUB_DEBUG
744 atomic_long_t nr_slabs;
745 atomic_long_t total_objects;
746 struct list_head full;
752 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
754 return s->node[node];
758 * Iterator over all nodes. The body will be executed for each node that has
759 * a kmem_cache_node structure allocated (which is true for all online nodes)
761 #define for_each_kmem_cache_node(__s, __node, __n) \
762 for (__node = 0; __node < nr_node_ids; __node++) \
763 if ((__n = get_node(__s, __node)))
767 void *slab_start(struct seq_file *m, loff_t *pos);
768 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
769 void slab_stop(struct seq_file *m, void *p);
770 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
771 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
772 void memcg_slab_stop(struct seq_file *m, void *p);
773 int memcg_slab_show(struct seq_file *m, void *p);
775 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
776 void dump_unreclaimable_slab(void);
778 static inline void dump_unreclaimable_slab(void)
783 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
785 #ifdef CONFIG_SLAB_FREELIST_RANDOM
786 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
788 void cache_random_seq_destroy(struct kmem_cache *cachep);
790 static inline int cache_random_seq_create(struct kmem_cache *cachep,
791 unsigned int count, gfp_t gfp)
795 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
796 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
798 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
800 if (static_branch_unlikely(&init_on_alloc)) {
803 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
804 return flags & __GFP_ZERO;
807 return flags & __GFP_ZERO;
810 static inline bool slab_want_init_on_free(struct kmem_cache *c)
812 if (static_branch_unlikely(&init_on_free))
814 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
818 #endif /* MM_SLAB_H */