__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
page_mapcount_reset(page);
- page->mapping = NULL;
+ /* In union with page->mapping where page allocator expects NULL */
+ page->slab_cache = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
if (!is_debug_pagealloc_cache(cachep))
return;
- kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
+ __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}
#else
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ if (unlikely(slab_want_init_on_free(cachep)))
+ memset(objp, 0, cachep->object_size);
+
/* Put the object into the quarantine, don't touch it for now. */
if (kasan_slab_free(cachep, objp, _RET_IP_))
return;
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
- if (unlikely(slab_want_init_on_free(cachep)))
- memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
memcg_slab_free_hook(cachep, &objp, 1);
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
+void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+{
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+ void *objp;
+
+ kpp->kp_ptr = object;
+ kpp->kp_page = page;
+ cachep = page->slab_cache;
+ kpp->kp_slab_cache = cachep;
+ objp = object - obj_offset(cachep);
+ kpp->kp_data_offset = obj_offset(cachep);
+ page = virt_to_head_page(objp);
+ objnr = obj_to_index(cachep, page, objp);
+ objp = index_to_obj(cachep, page, objnr);
+ kpp->kp_objp = objp;
+ if (DEBUG && cachep->flags & SLAB_STORE_USER)
+ kpp->kp_ret = *dbg_userword(cachep, objp);
+}
+
/**
* __do_kmalloc - allocate memory
* @size: how many bytes of memory are required.