1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 slab_flags_t flags; /* Active flags on the slab */
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
33 #else /* !CONFIG_SLOB */
35 struct memcg_cache_array {
37 struct kmem_cache *entries[0];
41 * This is the main placeholder for memcg-related information in kmem caches.
42 * Both the root cache and the child caches will have it. For the root cache,
43 * this will hold a dynamically allocated array large enough to hold
44 * information about the currently limited memcgs in the system. To allow the
45 * array to be accessed without taking any locks, on relocation we free the old
46 * version only after a grace period.
48 * Root and child caches hold different metadata.
50 * @root_cache: Common to root and child caches. NULL for root, pointer to
51 * the root cache for children.
53 * The following fields are specific to root caches.
55 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
56 * used to index child cachces during allocation and cleared
57 * early during shutdown.
59 * @root_caches_node: List node for slab_root_caches list.
61 * @children: List of all child caches. While the child caches are also
62 * reachable through @memcg_caches, a child cache remains on
63 * this list until it is actually destroyed.
65 * The following fields are specific to child caches.
67 * @memcg: Pointer to the memcg this cache belongs to.
69 * @children_node: List node for @root_cache->children list.
71 * @kmem_caches_node: List node for @memcg->kmem_caches list.
73 struct memcg_cache_params {
74 struct kmem_cache *root_cache;
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
88 void (*work_fn)(struct kmem_cache *);
90 struct rcu_head rcu_head;
91 struct work_struct work;
96 #endif /* CONFIG_SLOB */
99 #include <linux/slab_def.h>
103 #include <linux/slub_def.h>
106 #include <linux/memcontrol.h>
107 #include <linux/fault-inject.h>
108 #include <linux/kasan.h>
109 #include <linux/kmemleak.h>
110 #include <linux/random.h>
111 #include <linux/sched/mm.h>
114 * State of the slab allocator.
116 * This is used to describe the states of the allocator during bootup.
117 * Allocators use this to gradually bootstrap themselves. Most allocators
118 * have the problem that the structures used for managing slab caches are
119 * allocated from slab caches themselves.
122 DOWN, /* No slab functionality yet */
123 PARTIAL, /* SLUB: kmem_cache_node available */
124 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
125 UP, /* Slab caches usable but not all extras yet */
126 FULL /* Everything is working */
129 extern enum slab_state slab_state;
131 /* The slab cache mutex protects the management structures during changes */
132 extern struct mutex slab_mutex;
134 /* The list of all slab caches on the system */
135 extern struct list_head slab_caches;
137 /* The slab cache that manages slab cache information */
138 extern struct kmem_cache *kmem_cache;
140 /* A table of kmalloc cache names and sizes */
141 extern const struct kmalloc_info_struct {
142 const char *name[NR_KMALLOC_TYPES];
147 /* Kmalloc array related functions */
148 void setup_kmalloc_cache_index_table(void);
149 void create_kmalloc_caches(slab_flags_t);
151 /* Find the kmalloc slab corresponding for a certain size */
152 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
155 gfp_t kmalloc_fix_flags(gfp_t flags);
157 /* Functions provided by the slab allocators */
158 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
160 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
161 slab_flags_t flags, unsigned int useroffset,
162 unsigned int usersize);
163 extern void create_boot_cache(struct kmem_cache *, const char *name,
164 unsigned int size, slab_flags_t flags,
165 unsigned int useroffset, unsigned int usersize);
167 int slab_unmergeable(struct kmem_cache *s);
168 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
169 slab_flags_t flags, const char *name, void (*ctor)(void *));
172 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
173 slab_flags_t flags, void (*ctor)(void *));
175 slab_flags_t kmem_cache_flags(unsigned int object_size,
176 slab_flags_t flags, const char *name,
177 void (*ctor)(void *));
179 static inline struct kmem_cache *
180 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
181 slab_flags_t flags, void (*ctor)(void *))
184 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
185 slab_flags_t flags, const char *name,
186 void (*ctor)(void *))
193 /* Legal flag mask for kmem_cache_create(), for various configurations */
194 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
195 SLAB_CACHE_DMA32 | SLAB_PANIC | \
196 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
198 #if defined(CONFIG_DEBUG_SLAB)
199 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
200 #elif defined(CONFIG_SLUB_DEBUG)
201 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
202 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
204 #define SLAB_DEBUG_FLAGS (0)
207 #if defined(CONFIG_SLAB)
208 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
209 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
211 #elif defined(CONFIG_SLUB)
212 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
213 SLAB_TEMPORARY | SLAB_ACCOUNT)
215 #define SLAB_CACHE_FLAGS (0)
218 /* Common flags available with current configuration */
219 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
221 /* Common flags permitted for kmem_cache_create */
222 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
227 SLAB_CONSISTENCY_CHECKS | \
230 SLAB_RECLAIM_ACCOUNT | \
234 bool __kmem_cache_empty(struct kmem_cache *);
235 int __kmem_cache_shutdown(struct kmem_cache *);
236 void __kmem_cache_release(struct kmem_cache *);
237 int __kmem_cache_shrink(struct kmem_cache *);
238 void __kmemcg_cache_deactivate(struct kmem_cache *s);
239 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
240 void slab_kmem_cache_release(struct kmem_cache *);
241 void kmem_cache_shrink_all(struct kmem_cache *s);
247 unsigned long active_objs;
248 unsigned long num_objs;
249 unsigned long active_slabs;
250 unsigned long num_slabs;
251 unsigned long shared_avail;
253 unsigned int batchcount;
255 unsigned int objects_per_slab;
256 unsigned int cache_order;
259 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
260 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
261 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
262 size_t count, loff_t *ppos);
265 * Generic implementation of bulk operations
266 * These are useful for situations in which the allocator cannot
267 * perform optimizations. In that case segments of the object listed
268 * may be allocated or freed using these operations.
270 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
271 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
273 static inline int cache_vmstat_idx(struct kmem_cache *s)
275 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
276 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
279 #ifdef CONFIG_SLUB_DEBUG
280 #ifdef CONFIG_SLUB_DEBUG_ON
281 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
283 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
285 extern void print_tracking(struct kmem_cache *s, void *object);
287 static inline void print_tracking(struct kmem_cache *s, void *object)
293 * Returns true if any of the specified slub_debug flags is enabled for the
294 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
297 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
299 #ifdef CONFIG_SLUB_DEBUG
300 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
301 if (static_branch_unlikely(&slub_debug_enabled))
302 return s->flags & flags;
307 #ifdef CONFIG_MEMCG_KMEM
309 /* List of all root caches. */
310 extern struct list_head slab_root_caches;
311 #define root_caches_node memcg_params.__root_caches_node
314 * Iterate over all memcg caches of the given root cache. The caller must hold
317 #define for_each_memcg_cache(iter, root) \
318 list_for_each_entry(iter, &(root)->memcg_params.children, \
319 memcg_params.children_node)
321 static inline bool is_root_cache(struct kmem_cache *s)
323 return !s->memcg_params.root_cache;
326 static inline bool slab_equal_or_root(struct kmem_cache *s,
327 struct kmem_cache *p)
329 return p == s || p == s->memcg_params.root_cache;
333 * We use suffixes to the name in memcg because we can't have caches
334 * created in the system with the same name. But when we print them
335 * locally, better refer to them with the base name
337 static inline const char *cache_name(struct kmem_cache *s)
339 if (!is_root_cache(s))
340 s = s->memcg_params.root_cache;
344 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
346 if (is_root_cache(s))
348 return s->memcg_params.root_cache;
352 * Expects a pointer to a slab page. Please note, that PageSlab() check
353 * isn't sufficient, as it returns true also for tail compound slab pages,
354 * which do not have slab_cache pointer set.
355 * So this function assumes that the page can pass PageSlab() && !PageTail()
358 * The kmem_cache can be reparented asynchronously. The caller must ensure
359 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
361 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
363 struct kmem_cache *s;
365 s = READ_ONCE(page->slab_cache);
366 if (s && !is_root_cache(s))
367 return READ_ONCE(s->memcg_params.memcg);
373 * Charge the slab page belonging to the non-root kmem_cache.
374 * Can be called for non-root kmem_caches only.
376 static __always_inline int memcg_charge_slab(struct page *page,
377 gfp_t gfp, int order,
378 struct kmem_cache *s)
380 int nr_pages = 1 << order;
381 struct mem_cgroup *memcg;
382 struct lruvec *lruvec;
386 memcg = READ_ONCE(s->memcg_params.memcg);
387 while (memcg && !css_tryget_online(&memcg->css))
388 memcg = parent_mem_cgroup(memcg);
391 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
392 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
393 nr_pages << PAGE_SHIFT);
394 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
398 ret = memcg_kmem_charge(memcg, gfp, nr_pages);
402 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
403 mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT);
405 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
407 css_put(&memcg->css);
412 * Uncharge a slab page belonging to a non-root kmem_cache.
413 * Can be called for non-root kmem_caches only.
415 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
416 struct kmem_cache *s)
418 int nr_pages = 1 << order;
419 struct mem_cgroup *memcg;
420 struct lruvec *lruvec;
423 memcg = READ_ONCE(s->memcg_params.memcg);
424 if (likely(!mem_cgroup_is_root(memcg))) {
425 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
426 mod_lruvec_state(lruvec, cache_vmstat_idx(s),
427 -(nr_pages << PAGE_SHIFT));
428 memcg_kmem_uncharge(memcg, nr_pages);
430 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
431 -(nr_pages << PAGE_SHIFT));
435 percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
438 extern void slab_init_memcg_params(struct kmem_cache *);
439 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
441 #else /* CONFIG_MEMCG_KMEM */
443 /* If !memcg, all caches are root. */
444 #define slab_root_caches slab_caches
445 #define root_caches_node list
447 #define for_each_memcg_cache(iter, root) \
448 for ((void)(iter), (void)(root); 0; )
450 static inline bool is_root_cache(struct kmem_cache *s)
455 static inline bool slab_equal_or_root(struct kmem_cache *s,
456 struct kmem_cache *p)
461 static inline const char *cache_name(struct kmem_cache *s)
466 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
471 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
476 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
477 struct kmem_cache *s)
482 static inline void memcg_uncharge_slab(struct page *page, int order,
483 struct kmem_cache *s)
487 static inline void slab_init_memcg_params(struct kmem_cache *s)
491 static inline void memcg_link_cache(struct kmem_cache *s,
492 struct mem_cgroup *memcg)
496 #endif /* CONFIG_MEMCG_KMEM */
498 static inline struct kmem_cache *virt_to_cache(const void *obj)
502 page = virt_to_head_page(obj);
503 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
506 return page->slab_cache;
509 static __always_inline int charge_slab_page(struct page *page,
510 gfp_t gfp, int order,
511 struct kmem_cache *s)
513 if (is_root_cache(s)) {
514 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
519 return memcg_charge_slab(page, gfp, order, s);
522 static __always_inline void uncharge_slab_page(struct page *page, int order,
523 struct kmem_cache *s)
525 if (is_root_cache(s)) {
526 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
527 -(PAGE_SIZE << order));
531 memcg_uncharge_slab(page, order, s);
534 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
536 struct kmem_cache *cachep;
538 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
539 !memcg_kmem_enabled() &&
540 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
543 cachep = virt_to_cache(x);
544 if (WARN(cachep && !slab_equal_or_root(cachep, s),
545 "%s: Wrong slab cache. %s but object is from %s\n",
546 __func__, s->name, cachep->name))
547 print_tracking(cachep, x);
551 static inline size_t slab_ksize(const struct kmem_cache *s)
554 return s->object_size;
556 #else /* CONFIG_SLUB */
557 # ifdef CONFIG_SLUB_DEBUG
559 * Debugging requires use of the padding between object
560 * and whatever may come after it.
562 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
563 return s->object_size;
565 if (s->flags & SLAB_KASAN)
566 return s->object_size;
568 * If we have the need to store the freelist pointer
569 * back there or track user information then we can
570 * only use the space before that information.
572 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
575 * Else we can use all the padding etc for the allocation
581 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
584 flags &= gfp_allowed_mask;
586 fs_reclaim_acquire(flags);
587 fs_reclaim_release(flags);
589 might_sleep_if(gfpflags_allow_blocking(flags));
591 if (should_failslab(s, flags))
594 if (memcg_kmem_enabled() &&
595 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
596 return memcg_kmem_get_cache(s);
601 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
602 size_t size, void **p)
606 flags &= gfp_allowed_mask;
607 for (i = 0; i < size; i++) {
608 p[i] = kasan_slab_alloc(s, p[i], flags);
609 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
610 kmemleak_alloc_recursive(p[i], s->object_size, 1,
614 if (memcg_kmem_enabled())
615 memcg_kmem_put_cache(s);
620 * The slab lists for all objects.
622 struct kmem_cache_node {
623 spinlock_t list_lock;
626 struct list_head slabs_partial; /* partial list first, better asm code */
627 struct list_head slabs_full;
628 struct list_head slabs_free;
629 unsigned long total_slabs; /* length of all slab lists */
630 unsigned long free_slabs; /* length of free slab list only */
631 unsigned long free_objects;
632 unsigned int free_limit;
633 unsigned int colour_next; /* Per-node cache coloring */
634 struct array_cache *shared; /* shared per node */
635 struct alien_cache **alien; /* on other nodes */
636 unsigned long next_reap; /* updated without locking */
637 int free_touched; /* updated without locking */
641 unsigned long nr_partial;
642 struct list_head partial;
643 #ifdef CONFIG_SLUB_DEBUG
644 atomic_long_t nr_slabs;
645 atomic_long_t total_objects;
646 struct list_head full;
652 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
654 return s->node[node];
658 * Iterator over all nodes. The body will be executed for each node that has
659 * a kmem_cache_node structure allocated (which is true for all online nodes)
661 #define for_each_kmem_cache_node(__s, __node, __n) \
662 for (__node = 0; __node < nr_node_ids; __node++) \
663 if ((__n = get_node(__s, __node)))
667 void *slab_start(struct seq_file *m, loff_t *pos);
668 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
669 void slab_stop(struct seq_file *m, void *p);
670 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
671 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
672 void memcg_slab_stop(struct seq_file *m, void *p);
673 int memcg_slab_show(struct seq_file *m, void *p);
675 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
676 void dump_unreclaimable_slab(void);
678 static inline void dump_unreclaimable_slab(void)
683 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
685 #ifdef CONFIG_SLAB_FREELIST_RANDOM
686 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
688 void cache_random_seq_destroy(struct kmem_cache *cachep);
690 static inline int cache_random_seq_create(struct kmem_cache *cachep,
691 unsigned int count, gfp_t gfp)
695 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
696 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
698 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
700 if (static_branch_unlikely(&init_on_alloc)) {
703 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
704 return flags & __GFP_ZERO;
707 return flags & __GFP_ZERO;
710 static inline bool slab_want_init_on_free(struct kmem_cache *c)
712 if (static_branch_unlikely(&init_on_free))
714 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
718 #endif /* MM_SLAB_H */