1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
7 void __init kmem_cache_init(void);
10 # ifdef system_has_cmpxchg128
11 # define system_has_freelist_aba() system_has_cmpxchg128()
12 # define try_cmpxchg_freelist try_cmpxchg128
14 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128
15 typedef u128 freelist_full_t;
16 #else /* CONFIG_64BIT */
17 # ifdef system_has_cmpxchg64
18 # define system_has_freelist_aba() system_has_cmpxchg64()
19 # define try_cmpxchg_freelist try_cmpxchg64
21 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64
22 typedef u64 freelist_full_t;
23 #endif /* CONFIG_64BIT */
25 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
26 #undef system_has_freelist_aba
30 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
31 * problems with cmpxchg of just a pointer.
36 unsigned long counter;
41 /* Reuses the bits in struct page */
43 unsigned long __page_flags;
45 #if defined(CONFIG_SLAB)
47 struct kmem_cache *slab_cache;
50 struct list_head slab_list;
51 void *freelist; /* array of free object indexes */
52 void *s_mem; /* first object */
54 struct rcu_head rcu_head;
58 #elif defined(CONFIG_SLUB)
60 struct kmem_cache *slab_cache;
64 struct list_head slab_list;
65 #ifdef CONFIG_SLUB_CPU_PARTIAL
68 int slabs; /* Nr of slabs left */
72 /* Double-word boundary */
75 void *freelist; /* first free object */
77 unsigned long counters;
85 #ifdef system_has_freelist_aba
86 freelist_aba_t freelist_counter;
90 struct rcu_head rcu_head;
92 unsigned int __unused;
95 #error "Unexpected slab allocator configured"
98 atomic_t __page_refcount;
100 unsigned long memcg_data;
104 #define SLAB_MATCH(pg, sl) \
105 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
106 SLAB_MATCH(flags, __page_flags);
107 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
108 SLAB_MATCH(_refcount, __page_refcount);
110 SLAB_MATCH(memcg_data, memcg_data);
113 static_assert(sizeof(struct slab) <= sizeof(struct page));
114 #if defined(system_has_freelist_aba) && defined(CONFIG_SLUB)
115 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
119 * folio_slab - Converts from folio to slab.
122 * Currently struct slab is a different representation of a folio where
123 * folio_test_slab() is true.
125 * Return: The slab which contains this folio.
127 #define folio_slab(folio) (_Generic((folio), \
128 const struct folio *: (const struct slab *)(folio), \
129 struct folio *: (struct slab *)(folio)))
132 * slab_folio - The folio allocated for a slab
135 * Slabs are allocated as folios that contain the individual objects and are
136 * using some fields in the first struct page of the folio - those fields are
137 * now accessed by struct slab. It is occasionally necessary to convert back to
138 * a folio in order to communicate with the rest of the mm. Please use this
139 * helper function instead of casting yourself, as the implementation may change
142 #define slab_folio(s) (_Generic((s), \
143 const struct slab *: (const struct folio *)s, \
144 struct slab *: (struct folio *)s))
147 * page_slab - Converts from first struct page to slab.
148 * @p: The first (either head of compound or single) page of slab.
150 * A temporary wrapper to convert struct page to struct slab in situations where
151 * we know the page is the compound head, or single order-0 page.
153 * Long-term ideally everything would work with struct slab directly or go
154 * through folio to struct slab.
156 * Return: The slab which contains this page
158 #define page_slab(p) (_Generic((p), \
159 const struct page *: (const struct slab *)(p), \
160 struct page *: (struct slab *)(p)))
163 * slab_page - The first struct page allocated for a slab
166 * A convenience wrapper for converting slab to the first struct page of the
167 * underlying folio, to communicate with code not yet converted to folio or
170 #define slab_page(s) folio_page(slab_folio(s), 0)
173 * If network-based swap is enabled, sl*b must keep track of whether pages
174 * were allocated from pfmemalloc reserves.
176 static inline bool slab_test_pfmemalloc(const struct slab *slab)
178 return folio_test_active((struct folio *)slab_folio(slab));
181 static inline void slab_set_pfmemalloc(struct slab *slab)
183 folio_set_active(slab_folio(slab));
186 static inline void slab_clear_pfmemalloc(struct slab *slab)
188 folio_clear_active(slab_folio(slab));
191 static inline void __slab_clear_pfmemalloc(struct slab *slab)
193 __folio_clear_active(slab_folio(slab));
196 static inline void *slab_address(const struct slab *slab)
198 return folio_address(slab_folio(slab));
201 static inline int slab_nid(const struct slab *slab)
203 return folio_nid(slab_folio(slab));
206 static inline pg_data_t *slab_pgdat(const struct slab *slab)
208 return folio_pgdat(slab_folio(slab));
211 static inline struct slab *virt_to_slab(const void *addr)
213 struct folio *folio = virt_to_folio(addr);
215 if (!folio_test_slab(folio))
218 return folio_slab(folio);
221 static inline int slab_order(const struct slab *slab)
223 return folio_order((struct folio *)slab_folio(slab));
226 static inline size_t slab_size(const struct slab *slab)
228 return PAGE_SIZE << slab_order(slab);
232 #include <linux/slab_def.h>
236 #include <linux/slub_def.h>
239 #include <linux/memcontrol.h>
240 #include <linux/fault-inject.h>
241 #include <linux/kasan.h>
242 #include <linux/kmemleak.h>
243 #include <linux/random.h>
244 #include <linux/sched/mm.h>
245 #include <linux/list_lru.h>
248 * State of the slab allocator.
250 * This is used to describe the states of the allocator during bootup.
251 * Allocators use this to gradually bootstrap themselves. Most allocators
252 * have the problem that the structures used for managing slab caches are
253 * allocated from slab caches themselves.
256 DOWN, /* No slab functionality yet */
257 PARTIAL, /* SLUB: kmem_cache_node available */
258 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
259 UP, /* Slab caches usable but not all extras yet */
260 FULL /* Everything is working */
263 extern enum slab_state slab_state;
265 /* The slab cache mutex protects the management structures during changes */
266 extern struct mutex slab_mutex;
268 /* The list of all slab caches on the system */
269 extern struct list_head slab_caches;
271 /* The slab cache that manages slab cache information */
272 extern struct kmem_cache *kmem_cache;
274 /* A table of kmalloc cache names and sizes */
275 extern const struct kmalloc_info_struct {
276 const char *name[NR_KMALLOC_TYPES];
280 /* Kmalloc array related functions */
281 void setup_kmalloc_cache_index_table(void);
282 void create_kmalloc_caches(slab_flags_t);
284 /* Find the kmalloc slab corresponding for a certain size */
285 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
287 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
288 int node, size_t orig_size,
289 unsigned long caller);
290 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
292 gfp_t kmalloc_fix_flags(gfp_t flags);
294 /* Functions provided by the slab allocators */
295 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
297 void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type,
299 extern void create_boot_cache(struct kmem_cache *, const char *name,
300 unsigned int size, slab_flags_t flags,
301 unsigned int useroffset, unsigned int usersize);
303 int slab_unmergeable(struct kmem_cache *s);
304 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
305 slab_flags_t flags, const char *name, void (*ctor)(void *));
307 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
308 slab_flags_t flags, void (*ctor)(void *));
310 slab_flags_t kmem_cache_flags(unsigned int object_size,
311 slab_flags_t flags, const char *name);
313 static inline bool is_kmalloc_cache(struct kmem_cache *s)
315 return (s->flags & SLAB_KMALLOC);
318 /* Legal flag mask for kmem_cache_create(), for various configurations */
319 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
320 SLAB_CACHE_DMA32 | SLAB_PANIC | \
321 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
323 #if defined(CONFIG_DEBUG_SLAB)
324 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
325 #elif defined(CONFIG_SLUB_DEBUG)
326 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
327 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
329 #define SLAB_DEBUG_FLAGS (0)
332 #if defined(CONFIG_SLAB)
333 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
334 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
336 #elif defined(CONFIG_SLUB)
337 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
338 SLAB_TEMPORARY | SLAB_ACCOUNT | \
339 SLAB_NO_USER_FLAGS | SLAB_KMALLOC)
341 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
344 /* Common flags available with current configuration */
345 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
347 /* Common flags permitted for kmem_cache_create */
348 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
353 SLAB_CONSISTENCY_CHECKS | \
356 SLAB_RECLAIM_ACCOUNT | \
362 bool __kmem_cache_empty(struct kmem_cache *);
363 int __kmem_cache_shutdown(struct kmem_cache *);
364 void __kmem_cache_release(struct kmem_cache *);
365 int __kmem_cache_shrink(struct kmem_cache *);
366 void slab_kmem_cache_release(struct kmem_cache *);
372 unsigned long active_objs;
373 unsigned long num_objs;
374 unsigned long active_slabs;
375 unsigned long num_slabs;
376 unsigned long shared_avail;
378 unsigned int batchcount;
380 unsigned int objects_per_slab;
381 unsigned int cache_order;
384 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
385 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
386 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
387 size_t count, loff_t *ppos);
389 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
391 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
392 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
395 #ifdef CONFIG_SLUB_DEBUG
396 #ifdef CONFIG_SLUB_DEBUG_ON
397 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
399 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
401 extern void print_tracking(struct kmem_cache *s, void *object);
402 long validate_slab_cache(struct kmem_cache *s);
403 static inline bool __slub_debug_enabled(void)
405 return static_branch_unlikely(&slub_debug_enabled);
408 static inline void print_tracking(struct kmem_cache *s, void *object)
411 static inline bool __slub_debug_enabled(void)
418 * Returns true if any of the specified slub_debug flags is enabled for the
419 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
422 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
424 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
425 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
426 if (__slub_debug_enabled())
427 return s->flags & flags;
431 #ifdef CONFIG_MEMCG_KMEM
433 * slab_objcgs - get the object cgroups vector associated with a slab
434 * @slab: a pointer to the slab struct
436 * Returns a pointer to the object cgroups vector associated with the slab,
437 * or NULL if no such vector has been associated yet.
439 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
441 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
443 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
445 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
447 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
450 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
451 gfp_t gfp, bool new_slab);
452 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
453 enum node_stat_item idx, int nr);
455 static inline void memcg_free_slab_cgroups(struct slab *slab)
457 kfree(slab_objcgs(slab));
458 slab->memcg_data = 0;
461 static inline size_t obj_full_size(struct kmem_cache *s)
464 * For each accounted object there is an extra space which is used
465 * to store obj_cgroup membership. Charge it too.
467 return s->size + sizeof(struct obj_cgroup *);
471 * Returns false if the allocation should fail.
473 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
474 struct list_lru *lru,
475 struct obj_cgroup **objcgp,
476 size_t objects, gfp_t flags)
478 struct obj_cgroup *objcg;
480 if (!memcg_kmem_online())
483 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
486 objcg = get_obj_cgroup_from_current();
492 struct mem_cgroup *memcg;
494 memcg = get_mem_cgroup_from_objcg(objcg);
495 ret = memcg_list_lru_alloc(memcg, lru, flags);
496 css_put(&memcg->css);
502 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
508 obj_cgroup_put(objcg);
512 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
513 struct obj_cgroup *objcg,
514 gfp_t flags, size_t size,
521 if (!memcg_kmem_online() || !objcg)
524 for (i = 0; i < size; i++) {
526 slab = virt_to_slab(p[i]);
528 if (!slab_objcgs(slab) &&
529 memcg_alloc_slab_cgroups(slab, s, flags,
531 obj_cgroup_uncharge(objcg, obj_full_size(s));
535 off = obj_to_index(s, slab, p[i]);
536 obj_cgroup_get(objcg);
537 slab_objcgs(slab)[off] = objcg;
538 mod_objcg_state(objcg, slab_pgdat(slab),
539 cache_vmstat_idx(s), obj_full_size(s));
541 obj_cgroup_uncharge(objcg, obj_full_size(s));
544 obj_cgroup_put(objcg);
547 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
548 void **p, int objects)
550 struct obj_cgroup **objcgs;
553 if (!memcg_kmem_online())
556 objcgs = slab_objcgs(slab);
560 for (i = 0; i < objects; i++) {
561 struct obj_cgroup *objcg;
564 off = obj_to_index(s, slab, p[i]);
570 obj_cgroup_uncharge(objcg, obj_full_size(s));
571 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
573 obj_cgroup_put(objcg);
577 #else /* CONFIG_MEMCG_KMEM */
578 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
583 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
588 static inline int memcg_alloc_slab_cgroups(struct slab *slab,
589 struct kmem_cache *s, gfp_t gfp,
595 static inline void memcg_free_slab_cgroups(struct slab *slab)
599 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
600 struct list_lru *lru,
601 struct obj_cgroup **objcgp,
602 size_t objects, gfp_t flags)
607 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
608 struct obj_cgroup *objcg,
609 gfp_t flags, size_t size,
614 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
615 void **p, int objects)
618 #endif /* CONFIG_MEMCG_KMEM */
620 static inline struct kmem_cache *virt_to_cache(const void *obj)
624 slab = virt_to_slab(obj);
625 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
628 return slab->slab_cache;
631 static __always_inline void account_slab(struct slab *slab, int order,
632 struct kmem_cache *s, gfp_t gfp)
634 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
635 memcg_alloc_slab_cgroups(slab, s, gfp, true);
637 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
641 static __always_inline void unaccount_slab(struct slab *slab, int order,
642 struct kmem_cache *s)
644 if (memcg_kmem_online())
645 memcg_free_slab_cgroups(slab);
647 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
648 -(PAGE_SIZE << order));
651 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
653 struct kmem_cache *cachep;
655 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
656 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
659 cachep = virt_to_cache(x);
660 if (WARN(cachep && cachep != s,
661 "%s: Wrong slab cache. %s but object is from %s\n",
662 __func__, s->name, cachep->name))
663 print_tracking(cachep, x);
667 void free_large_kmalloc(struct folio *folio, void *object);
669 size_t __ksize(const void *objp);
671 static inline size_t slab_ksize(const struct kmem_cache *s)
674 return s->object_size;
676 #else /* CONFIG_SLUB */
677 # ifdef CONFIG_SLUB_DEBUG
679 * Debugging requires use of the padding between object
680 * and whatever may come after it.
682 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
683 return s->object_size;
685 if (s->flags & SLAB_KASAN)
686 return s->object_size;
688 * If we have the need to store the freelist pointer
689 * back there or track user information then we can
690 * only use the space before that information.
692 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
695 * Else we can use all the padding etc for the allocation
701 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
702 struct list_lru *lru,
703 struct obj_cgroup **objcgp,
704 size_t size, gfp_t flags)
706 flags &= gfp_allowed_mask;
710 if (should_failslab(s, flags))
713 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
719 static inline void slab_post_alloc_hook(struct kmem_cache *s,
720 struct obj_cgroup *objcg, gfp_t flags,
721 size_t size, void **p, bool init,
722 unsigned int orig_size)
724 unsigned int zero_size = s->object_size;
727 flags &= gfp_allowed_mask;
730 * For kmalloc object, the allocated memory size(object_size) is likely
731 * larger than the requested size(orig_size). If redzone check is
732 * enabled for the extra space, don't zero it, as it will be redzoned
733 * soon. The redzone operation for this extra space could be seen as a
734 * replacement of current poisoning under certain debug option, and
735 * won't break other sanity checks.
737 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
738 (s->flags & SLAB_KMALLOC))
739 zero_size = orig_size;
742 * As memory initialization might be integrated into KASAN,
743 * kasan_slab_alloc and initialization memset must be
744 * kept together to avoid discrepancies in behavior.
746 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
748 for (i = 0; i < size; i++) {
749 p[i] = kasan_slab_alloc(s, p[i], flags, init);
750 if (p[i] && init && !kasan_has_integrated_init())
751 memset(p[i], 0, zero_size);
752 kmemleak_alloc_recursive(p[i], s->object_size, 1,
754 kmsan_slab_alloc(s, p[i], flags);
757 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
761 * The slab lists for all objects.
763 struct kmem_cache_node {
765 raw_spinlock_t list_lock;
766 struct list_head slabs_partial; /* partial list first, better asm code */
767 struct list_head slabs_full;
768 struct list_head slabs_free;
769 unsigned long total_slabs; /* length of all slab lists */
770 unsigned long free_slabs; /* length of free slab list only */
771 unsigned long free_objects;
772 unsigned int free_limit;
773 unsigned int colour_next; /* Per-node cache coloring */
774 struct array_cache *shared; /* shared per node */
775 struct alien_cache **alien; /* on other nodes */
776 unsigned long next_reap; /* updated without locking */
777 int free_touched; /* updated without locking */
781 spinlock_t list_lock;
782 unsigned long nr_partial;
783 struct list_head partial;
784 #ifdef CONFIG_SLUB_DEBUG
785 atomic_long_t nr_slabs;
786 atomic_long_t total_objects;
787 struct list_head full;
793 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
795 return s->node[node];
799 * Iterator over all nodes. The body will be executed for each node that has
800 * a kmem_cache_node structure allocated (which is true for all online nodes)
802 #define for_each_kmem_cache_node(__s, __node, __n) \
803 for (__node = 0; __node < nr_node_ids; __node++) \
804 if ((__n = get_node(__s, __node)))
807 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
808 void dump_unreclaimable_slab(void);
810 static inline void dump_unreclaimable_slab(void)
815 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
817 #ifdef CONFIG_SLAB_FREELIST_RANDOM
818 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
820 void cache_random_seq_destroy(struct kmem_cache *cachep);
822 static inline int cache_random_seq_create(struct kmem_cache *cachep,
823 unsigned int count, gfp_t gfp)
827 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
828 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
830 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
832 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
836 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
837 return flags & __GFP_ZERO;
840 return flags & __GFP_ZERO;
843 static inline bool slab_want_init_on_free(struct kmem_cache *c)
845 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
848 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
852 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
853 void debugfs_slab_release(struct kmem_cache *);
855 static inline void debugfs_slab_release(struct kmem_cache *s) { }
859 #define KS_ADDRS_COUNT 16
860 struct kmem_obj_info {
862 struct slab *kp_slab;
864 unsigned long kp_data_offset;
865 struct kmem_cache *kp_slab_cache;
867 void *kp_stack[KS_ADDRS_COUNT];
868 void *kp_free_stack[KS_ADDRS_COUNT];
870 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
873 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
874 void __check_heap_object(const void *ptr, unsigned long n,
875 const struct slab *slab, bool to_user);
878 void __check_heap_object(const void *ptr, unsigned long n,
879 const struct slab *slab, bool to_user)
884 #ifdef CONFIG_SLUB_DEBUG
885 void skip_orig_size_check(struct kmem_cache *s, const void *object);
888 #endif /* MM_SLAB_H */