1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 slab_flags_t flags; /* Active flags on the slab */
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
33 #else /* !CONFIG_SLOB */
35 struct memcg_cache_array {
37 struct kmem_cache *entries[0];
41 * This is the main placeholder for memcg-related information in kmem caches.
42 * Both the root cache and the child caches will have it. For the root cache,
43 * this will hold a dynamically allocated array large enough to hold
44 * information about the currently limited memcgs in the system. To allow the
45 * array to be accessed without taking any locks, on relocation we free the old
46 * version only after a grace period.
48 * Root and child caches hold different metadata.
50 * @root_cache: Common to root and child caches. NULL for root, pointer to
51 * the root cache for children.
53 * The following fields are specific to root caches.
55 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
56 * used to index child cachces during allocation and cleared
57 * early during shutdown.
59 * @root_caches_node: List node for slab_root_caches list.
61 * @children: List of all child caches. While the child caches are also
62 * reachable through @memcg_caches, a child cache remains on
63 * this list until it is actually destroyed.
65 * The following fields are specific to child caches.
67 * @memcg: Pointer to the memcg this cache belongs to.
69 * @children_node: List node for @root_cache->children list.
71 * @kmem_caches_node: List node for @memcg->kmem_caches list.
73 struct memcg_cache_params {
74 struct kmem_cache *root_cache;
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
88 void (*work_fn)(struct kmem_cache *);
90 struct rcu_head rcu_head;
91 struct work_struct work;
96 #endif /* CONFIG_SLOB */
99 #include <linux/slab_def.h>
103 #include <linux/slub_def.h>
106 #include <linux/memcontrol.h>
107 #include <linux/fault-inject.h>
108 #include <linux/kasan.h>
109 #include <linux/kmemleak.h>
110 #include <linux/random.h>
111 #include <linux/sched/mm.h>
112 #include <linux/kmemleak.h>
115 * State of the slab allocator.
117 * This is used to describe the states of the allocator during bootup.
118 * Allocators use this to gradually bootstrap themselves. Most allocators
119 * have the problem that the structures used for managing slab caches are
120 * allocated from slab caches themselves.
123 DOWN, /* No slab functionality yet */
124 PARTIAL, /* SLUB: kmem_cache_node available */
125 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
126 UP, /* Slab caches usable but not all extras yet */
127 FULL /* Everything is working */
130 extern enum slab_state slab_state;
132 /* The slab cache mutex protects the management structures during changes */
133 extern struct mutex slab_mutex;
135 /* The list of all slab caches on the system */
136 extern struct list_head slab_caches;
138 /* The slab cache that manages slab cache information */
139 extern struct kmem_cache *kmem_cache;
141 /* A table of kmalloc cache names and sizes */
142 extern const struct kmalloc_info_struct {
143 const char *name[NR_KMALLOC_TYPES];
148 /* Kmalloc array related functions */
149 void setup_kmalloc_cache_index_table(void);
150 void create_kmalloc_caches(slab_flags_t);
152 /* Find the kmalloc slab corresponding for a certain size */
153 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
156 gfp_t kmalloc_fix_flags(gfp_t flags);
158 /* Functions provided by the slab allocators */
159 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
161 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
162 slab_flags_t flags, unsigned int useroffset,
163 unsigned int usersize);
164 extern void create_boot_cache(struct kmem_cache *, const char *name,
165 unsigned int size, slab_flags_t flags,
166 unsigned int useroffset, unsigned int usersize);
168 int slab_unmergeable(struct kmem_cache *s);
169 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
170 slab_flags_t flags, const char *name, void (*ctor)(void *));
173 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
174 slab_flags_t flags, void (*ctor)(void *));
176 slab_flags_t kmem_cache_flags(unsigned int object_size,
177 slab_flags_t flags, const char *name,
178 void (*ctor)(void *));
180 static inline struct kmem_cache *
181 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
182 slab_flags_t flags, void (*ctor)(void *))
185 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
186 slab_flags_t flags, const char *name,
187 void (*ctor)(void *))
194 /* Legal flag mask for kmem_cache_create(), for various configurations */
195 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
196 SLAB_CACHE_DMA32 | SLAB_PANIC | \
197 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
199 #if defined(CONFIG_DEBUG_SLAB)
200 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
201 #elif defined(CONFIG_SLUB_DEBUG)
202 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
203 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
205 #define SLAB_DEBUG_FLAGS (0)
208 #if defined(CONFIG_SLAB)
209 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
210 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
212 #elif defined(CONFIG_SLUB)
213 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
214 SLAB_TEMPORARY | SLAB_ACCOUNT)
216 #define SLAB_CACHE_FLAGS (0)
219 /* Common flags available with current configuration */
220 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
222 /* Common flags permitted for kmem_cache_create */
223 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
228 SLAB_CONSISTENCY_CHECKS | \
231 SLAB_RECLAIM_ACCOUNT | \
235 bool __kmem_cache_empty(struct kmem_cache *);
236 int __kmem_cache_shutdown(struct kmem_cache *);
237 void __kmem_cache_release(struct kmem_cache *);
238 int __kmem_cache_shrink(struct kmem_cache *);
239 void __kmemcg_cache_deactivate(struct kmem_cache *s);
240 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
241 void slab_kmem_cache_release(struct kmem_cache *);
242 void kmem_cache_shrink_all(struct kmem_cache *s);
248 unsigned long active_objs;
249 unsigned long num_objs;
250 unsigned long active_slabs;
251 unsigned long num_slabs;
252 unsigned long shared_avail;
254 unsigned int batchcount;
256 unsigned int objects_per_slab;
257 unsigned int cache_order;
260 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
261 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
262 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
263 size_t count, loff_t *ppos);
266 * Generic implementation of bulk operations
267 * These are useful for situations in which the allocator cannot
268 * perform optimizations. In that case segments of the object listed
269 * may be allocated or freed using these operations.
271 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
272 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
274 static inline int cache_vmstat_idx(struct kmem_cache *s)
276 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
277 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
280 #ifdef CONFIG_SLUB_DEBUG
281 #ifdef CONFIG_SLUB_DEBUG_ON
282 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
284 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
286 extern void print_tracking(struct kmem_cache *s, void *object);
288 static inline void print_tracking(struct kmem_cache *s, void *object)
294 * Returns true if any of the specified slub_debug flags is enabled for the
295 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
298 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
300 #ifdef CONFIG_SLUB_DEBUG
301 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
302 if (static_branch_unlikely(&slub_debug_enabled))
303 return s->flags & flags;
308 #ifdef CONFIG_MEMCG_KMEM
310 /* List of all root caches. */
311 extern struct list_head slab_root_caches;
312 #define root_caches_node memcg_params.__root_caches_node
315 * Iterate over all memcg caches of the given root cache. The caller must hold
318 #define for_each_memcg_cache(iter, root) \
319 list_for_each_entry(iter, &(root)->memcg_params.children, \
320 memcg_params.children_node)
322 static inline bool is_root_cache(struct kmem_cache *s)
324 return !s->memcg_params.root_cache;
327 static inline bool slab_equal_or_root(struct kmem_cache *s,
328 struct kmem_cache *p)
330 return p == s || p == s->memcg_params.root_cache;
334 * We use suffixes to the name in memcg because we can't have caches
335 * created in the system with the same name. But when we print them
336 * locally, better refer to them with the base name
338 static inline const char *cache_name(struct kmem_cache *s)
340 if (!is_root_cache(s))
341 s = s->memcg_params.root_cache;
345 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
347 if (is_root_cache(s))
349 return s->memcg_params.root_cache;
352 static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
355 * page->mem_cgroup and page->obj_cgroups are sharing the same
356 * space. To distinguish between them in case we don't know for sure
357 * that the page is a slab page (e.g. page_cgroup_ino()), let's
358 * always set the lowest bit of obj_cgroups.
360 return (struct obj_cgroup **)
361 ((unsigned long)page->obj_cgroups & ~0x1UL);
365 * Expects a pointer to a slab page. Please note, that PageSlab() check
366 * isn't sufficient, as it returns true also for tail compound slab pages,
367 * which do not have slab_cache pointer set.
368 * So this function assumes that the page can pass PageSlab() && !PageTail()
371 * The kmem_cache can be reparented asynchronously. The caller must ensure
372 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
374 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
376 struct kmem_cache *s;
378 s = READ_ONCE(page->slab_cache);
379 if (s && !is_root_cache(s))
380 return READ_ONCE(s->memcg_params.memcg);
385 static inline int memcg_alloc_page_obj_cgroups(struct page *page,
386 struct kmem_cache *s, gfp_t gfp)
388 unsigned int objects = objs_per_slab_page(s, page);
391 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
396 kmemleak_not_leak(vec);
397 page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
401 static inline void memcg_free_page_obj_cgroups(struct page *page)
403 kfree(page_obj_cgroups(page));
404 page->obj_cgroups = NULL;
407 static inline size_t obj_full_size(struct kmem_cache *s)
410 * For each accounted object there is an extra space which is used
411 * to store obj_cgroup membership. Charge it too.
413 return s->size + sizeof(struct obj_cgroup *);
416 static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
417 struct obj_cgroup **objcgp,
418 size_t objects, gfp_t flags)
420 struct kmem_cache *cachep;
422 cachep = memcg_kmem_get_cache(s, objcgp);
423 if (is_root_cache(cachep))
426 if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) {
427 obj_cgroup_put(*objcgp);
428 memcg_kmem_put_cache(cachep);
435 static inline void mod_objcg_state(struct obj_cgroup *objcg,
436 struct pglist_data *pgdat,
439 struct mem_cgroup *memcg;
440 struct lruvec *lruvec;
443 memcg = obj_cgroup_memcg(objcg);
444 lruvec = mem_cgroup_lruvec(memcg, pgdat);
445 mod_memcg_lruvec_state(lruvec, idx, nr);
449 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
450 struct obj_cgroup *objcg,
451 size_t size, void **p)
457 for (i = 0; i < size; i++) {
459 page = virt_to_head_page(p[i]);
460 off = obj_to_index(s, page, p[i]);
461 obj_cgroup_get(objcg);
462 page_obj_cgroups(page)[off] = objcg;
463 mod_objcg_state(objcg, page_pgdat(page),
464 cache_vmstat_idx(s), obj_full_size(s));
466 obj_cgroup_uncharge(objcg, obj_full_size(s));
469 obj_cgroup_put(objcg);
470 memcg_kmem_put_cache(s);
473 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
476 struct obj_cgroup *objcg;
479 if (!memcg_kmem_enabled() || is_root_cache(s))
482 off = obj_to_index(s, page, p);
483 objcg = page_obj_cgroups(page)[off];
484 page_obj_cgroups(page)[off] = NULL;
486 obj_cgroup_uncharge(objcg, obj_full_size(s));
487 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
490 obj_cgroup_put(objcg);
493 extern void slab_init_memcg_params(struct kmem_cache *);
494 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
496 #else /* CONFIG_MEMCG_KMEM */
498 /* If !memcg, all caches are root. */
499 #define slab_root_caches slab_caches
500 #define root_caches_node list
502 #define for_each_memcg_cache(iter, root) \
503 for ((void)(iter), (void)(root); 0; )
505 static inline bool is_root_cache(struct kmem_cache *s)
510 static inline bool slab_equal_or_root(struct kmem_cache *s,
511 struct kmem_cache *p)
516 static inline const char *cache_name(struct kmem_cache *s)
521 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
526 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
531 static inline int memcg_alloc_page_obj_cgroups(struct page *page,
532 struct kmem_cache *s, gfp_t gfp)
537 static inline void memcg_free_page_obj_cgroups(struct page *page)
541 static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
542 struct obj_cgroup **objcgp,
543 size_t objects, gfp_t flags)
548 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
549 struct obj_cgroup *objcg,
550 size_t size, void **p)
554 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
559 static inline void slab_init_memcg_params(struct kmem_cache *s)
563 static inline void memcg_link_cache(struct kmem_cache *s,
564 struct mem_cgroup *memcg)
568 #endif /* CONFIG_MEMCG_KMEM */
570 static inline struct kmem_cache *virt_to_cache(const void *obj)
574 page = virt_to_head_page(obj);
575 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
578 return page->slab_cache;
581 static __always_inline int charge_slab_page(struct page *page,
582 gfp_t gfp, int order,
583 struct kmem_cache *s)
585 #ifdef CONFIG_MEMCG_KMEM
586 if (memcg_kmem_enabled() && !is_root_cache(s)) {
589 ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
593 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
596 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
601 static __always_inline void uncharge_slab_page(struct page *page, int order,
602 struct kmem_cache *s)
604 #ifdef CONFIG_MEMCG_KMEM
605 if (memcg_kmem_enabled() && !is_root_cache(s)) {
606 memcg_free_page_obj_cgroups(page);
607 percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
610 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
611 -(PAGE_SIZE << order));
614 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
616 struct kmem_cache *cachep;
618 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
619 !memcg_kmem_enabled() &&
620 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
623 cachep = virt_to_cache(x);
624 if (WARN(cachep && !slab_equal_or_root(cachep, s),
625 "%s: Wrong slab cache. %s but object is from %s\n",
626 __func__, s->name, cachep->name))
627 print_tracking(cachep, x);
631 static inline size_t slab_ksize(const struct kmem_cache *s)
634 return s->object_size;
636 #else /* CONFIG_SLUB */
637 # ifdef CONFIG_SLUB_DEBUG
639 * Debugging requires use of the padding between object
640 * and whatever may come after it.
642 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
643 return s->object_size;
645 if (s->flags & SLAB_KASAN)
646 return s->object_size;
648 * If we have the need to store the freelist pointer
649 * back there or track user information then we can
650 * only use the space before that information.
652 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
655 * Else we can use all the padding etc for the allocation
661 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
662 struct obj_cgroup **objcgp,
663 size_t size, gfp_t flags)
665 flags &= gfp_allowed_mask;
667 fs_reclaim_acquire(flags);
668 fs_reclaim_release(flags);
670 might_sleep_if(gfpflags_allow_blocking(flags));
672 if (should_failslab(s, flags))
675 if (memcg_kmem_enabled() &&
676 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
677 return memcg_slab_pre_alloc_hook(s, objcgp, size, flags);
682 static inline void slab_post_alloc_hook(struct kmem_cache *s,
683 struct obj_cgroup *objcg,
684 gfp_t flags, size_t size, void **p)
688 flags &= gfp_allowed_mask;
689 for (i = 0; i < size; i++) {
690 p[i] = kasan_slab_alloc(s, p[i], flags);
691 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
692 kmemleak_alloc_recursive(p[i], s->object_size, 1,
696 if (memcg_kmem_enabled() && !is_root_cache(s))
697 memcg_slab_post_alloc_hook(s, objcg, size, p);
702 * The slab lists for all objects.
704 struct kmem_cache_node {
705 spinlock_t list_lock;
708 struct list_head slabs_partial; /* partial list first, better asm code */
709 struct list_head slabs_full;
710 struct list_head slabs_free;
711 unsigned long total_slabs; /* length of all slab lists */
712 unsigned long free_slabs; /* length of free slab list only */
713 unsigned long free_objects;
714 unsigned int free_limit;
715 unsigned int colour_next; /* Per-node cache coloring */
716 struct array_cache *shared; /* shared per node */
717 struct alien_cache **alien; /* on other nodes */
718 unsigned long next_reap; /* updated without locking */
719 int free_touched; /* updated without locking */
723 unsigned long nr_partial;
724 struct list_head partial;
725 #ifdef CONFIG_SLUB_DEBUG
726 atomic_long_t nr_slabs;
727 atomic_long_t total_objects;
728 struct list_head full;
734 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
736 return s->node[node];
740 * Iterator over all nodes. The body will be executed for each node that has
741 * a kmem_cache_node structure allocated (which is true for all online nodes)
743 #define for_each_kmem_cache_node(__s, __node, __n) \
744 for (__node = 0; __node < nr_node_ids; __node++) \
745 if ((__n = get_node(__s, __node)))
749 void *slab_start(struct seq_file *m, loff_t *pos);
750 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
751 void slab_stop(struct seq_file *m, void *p);
752 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
753 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
754 void memcg_slab_stop(struct seq_file *m, void *p);
755 int memcg_slab_show(struct seq_file *m, void *p);
757 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
758 void dump_unreclaimable_slab(void);
760 static inline void dump_unreclaimable_slab(void)
765 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
767 #ifdef CONFIG_SLAB_FREELIST_RANDOM
768 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
770 void cache_random_seq_destroy(struct kmem_cache *cachep);
772 static inline int cache_random_seq_create(struct kmem_cache *cachep,
773 unsigned int count, gfp_t gfp)
777 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
778 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
780 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
782 if (static_branch_unlikely(&init_on_alloc)) {
785 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
786 return flags & __GFP_ZERO;
789 return flags & __GFP_ZERO;
792 static inline bool slab_want_init_on_free(struct kmem_cache *c)
794 if (static_branch_unlikely(&init_on_free))
796 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
800 #endif /* MM_SLAB_H */