#define BATCHREFILL_LIMIT 16
/*
- * Optimization question: fewer reaps means less probability for unnessary
+ * Optimization question: fewer reaps means less probability for unnecessary
* cpucache drain/refill cycles.
*
* OTOH the cpuarrays can contain lots of objects,
* Because if it is the case, that means we defer the creation of
* the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
* And we eventually call down to __kmem_cache_create(), which
- * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
+ * in turn looks up in the kmalloc_{dma,}_caches for the desired-size one.
* This is a "chicken-and-egg" problem.
*
* So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
};
/*
- * Initialize the state based on the randomization methode available.
- * return true if the pre-computed list is available, false otherwize.
+ * Initialize the state based on the randomization method available.
+ * return true if the pre-computed list is available, false otherwise.
*/
static bool freelist_state_initialize(union freelist_init_state *state,
struct kmem_cache *cachep,
void *ptr;
int slab_node = numa_mem_id();
struct obj_cgroup *objcg = NULL;
+ bool init = false;
flags &= gfp_allowed_mask;
cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
-
- if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
- memset(ptr, 0, cachep->object_size);
+ init = slab_want_init_on_alloc(flags, cachep);
out_hooks:
- slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
+ slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
return ptr;
}
unsigned long save_flags;
void *objp;
struct obj_cgroup *objcg = NULL;
+ bool init = false;
flags &= gfp_allowed_mask;
cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
-
- if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
- memset(objp, 0, cachep->object_size);
+ init = slab_want_init_on_alloc(flags, cachep);
out:
- slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
+ slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
return objp;
}
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ bool init;
+
if (is_kfence_address(objp)) {
kmemleak_free_recursive(objp, cachep->flags);
__kfence_free(objp);
return;
}
- if (unlikely(slab_want_init_on_free(cachep)))
+ /*
+ * As memory initialization might be integrated into KASAN,
+ * kasan_slab_free and initialization memset must be
+ * kept together to avoid discrepancies in behavior.
+ */
+ init = slab_want_init_on_free(cachep);
+ if (init && !kasan_has_integrated_init())
memset(objp, 0, cachep->object_size);
-
- /* Put the object into the quarantine, don't touch it for now. */
- if (kasan_slab_free(cachep, objp))
+ /* KASAN might put objp into memory quarantine, delaying its reuse. */
+ if (kasan_slab_free(cachep, objp, init))
return;
/* Use KCSAN to help debug racy use-after-free. */
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
- /* Clear memory outside IRQ disabled section */
- if (unlikely(slab_want_init_on_alloc(flags, s)))
- for (i = 0; i < size; i++)
- memset(p[i], 0, s->object_size);
-
- slab_post_alloc_hook(s, objcg, flags, size, p);
+ /*
+ * memcg and kmem_cache debug support and memory initialization.
+ * Done outside of the IRQ disabled section.
+ */
+ slab_post_alloc_hook(s, objcg, flags, size, p,
+ slab_want_init_on_alloc(flags, s));
/* FIXME: Trace call missing. Christoph would like a bulk variant */
return size;
error:
local_irq_enable();
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
- slab_post_alloc_hook(s, objcg, flags, i, p);
+ slab_post_alloc_hook(s, objcg, flags, i, p, false);
__kmem_cache_free_bulk(s, i, p);
return 0;
}