1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM kmem
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
12 DECLARE_EVENT_CLASS(kmem_alloc,
14 TP_PROTO(unsigned long call_site,
21 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
24 __field( unsigned long, call_site )
25 __field( const void *, ptr )
26 __field( size_t, bytes_req )
27 __field( size_t, bytes_alloc )
28 __field( unsigned long, gfp_flags )
29 __field( bool, accounted )
33 __entry->call_site = call_site;
35 __entry->bytes_req = bytes_req;
36 __entry->bytes_alloc = bytes_alloc;
37 __entry->gfp_flags = (__force unsigned long)gfp_flags;
38 __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
39 ((gfp_flags & __GFP_ACCOUNT) ||
40 (s && s->flags & SLAB_ACCOUNT)) : false;
43 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
44 (void *)__entry->call_site,
48 show_gfp_flags(__entry->gfp_flags),
49 __entry->accounted ? "true" : "false")
52 DEFINE_EVENT(kmem_alloc, kmalloc,
54 TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
55 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
57 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
60 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
62 TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
63 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
65 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
68 DECLARE_EVENT_CLASS(kmem_alloc_node,
70 TP_PROTO(unsigned long call_site,
78 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
81 __field( unsigned long, call_site )
82 __field( const void *, ptr )
83 __field( size_t, bytes_req )
84 __field( size_t, bytes_alloc )
85 __field( unsigned long, gfp_flags )
87 __field( bool, accounted )
91 __entry->call_site = call_site;
93 __entry->bytes_req = bytes_req;
94 __entry->bytes_alloc = bytes_alloc;
95 __entry->gfp_flags = (__force unsigned long)gfp_flags;
97 __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
98 ((gfp_flags & __GFP_ACCOUNT) ||
99 (s && s->flags & SLAB_ACCOUNT)) : false;
102 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
103 (void *)__entry->call_site,
106 __entry->bytes_alloc,
107 show_gfp_flags(__entry->gfp_flags),
109 __entry->accounted ? "true" : "false")
112 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
114 TP_PROTO(unsigned long call_site, const void *ptr,
115 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
116 gfp_t gfp_flags, int node),
118 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
121 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
123 TP_PROTO(unsigned long call_site, const void *ptr,
124 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
125 gfp_t gfp_flags, int node),
127 TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
132 TP_PROTO(unsigned long call_site, const void *ptr),
134 TP_ARGS(call_site, ptr),
137 __field( unsigned long, call_site )
138 __field( const void *, ptr )
142 __entry->call_site = call_site;
146 TP_printk("call_site=%pS ptr=%p",
147 (void *)__entry->call_site, __entry->ptr)
150 TRACE_EVENT(kmem_cache_free,
152 TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
154 TP_ARGS(call_site, ptr, name),
157 __field( unsigned long, call_site )
158 __field( const void *, ptr )
159 __string( name, name )
163 __entry->call_site = call_site;
165 __assign_str(name, name);
168 TP_printk("call_site=%pS ptr=%p name=%s",
169 (void *)__entry->call_site, __entry->ptr, __get_str(name))
172 TRACE_EVENT(mm_page_free,
174 TP_PROTO(struct page *page, unsigned int order),
176 TP_ARGS(page, order),
179 __field( unsigned long, pfn )
180 __field( unsigned int, order )
184 __entry->pfn = page_to_pfn(page);
185 __entry->order = order;
188 TP_printk("page=%p pfn=0x%lx order=%d",
189 pfn_to_page(__entry->pfn),
194 TRACE_EVENT(mm_page_free_batched,
196 TP_PROTO(struct page *page),
201 __field( unsigned long, pfn )
205 __entry->pfn = page_to_pfn(page);
208 TP_printk("page=%p pfn=0x%lx order=0",
209 pfn_to_page(__entry->pfn),
213 TRACE_EVENT(mm_page_alloc,
215 TP_PROTO(struct page *page, unsigned int order,
216 gfp_t gfp_flags, int migratetype),
218 TP_ARGS(page, order, gfp_flags, migratetype),
221 __field( unsigned long, pfn )
222 __field( unsigned int, order )
223 __field( unsigned long, gfp_flags )
224 __field( int, migratetype )
228 __entry->pfn = page ? page_to_pfn(page) : -1UL;
229 __entry->order = order;
230 __entry->gfp_flags = (__force unsigned long)gfp_flags;
231 __entry->migratetype = migratetype;
234 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
235 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
236 __entry->pfn != -1UL ? __entry->pfn : 0,
238 __entry->migratetype,
239 show_gfp_flags(__entry->gfp_flags))
242 DECLARE_EVENT_CLASS(mm_page,
244 TP_PROTO(struct page *page, unsigned int order, int migratetype,
247 TP_ARGS(page, order, migratetype, percpu_refill),
250 __field( unsigned long, pfn )
251 __field( unsigned int, order )
252 __field( int, migratetype )
253 __field( int, percpu_refill )
257 __entry->pfn = page ? page_to_pfn(page) : -1UL;
258 __entry->order = order;
259 __entry->migratetype = migratetype;
260 __entry->percpu_refill = percpu_refill;
263 TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
264 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
265 __entry->pfn != -1UL ? __entry->pfn : 0,
267 __entry->migratetype,
268 __entry->percpu_refill)
271 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
273 TP_PROTO(struct page *page, unsigned int order, int migratetype,
276 TP_ARGS(page, order, migratetype, percpu_refill)
279 TRACE_EVENT(mm_page_pcpu_drain,
281 TP_PROTO(struct page *page, unsigned int order, int migratetype),
283 TP_ARGS(page, order, migratetype),
286 __field( unsigned long, pfn )
287 __field( unsigned int, order )
288 __field( int, migratetype )
292 __entry->pfn = page ? page_to_pfn(page) : -1UL;
293 __entry->order = order;
294 __entry->migratetype = migratetype;
297 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
298 pfn_to_page(__entry->pfn), __entry->pfn,
299 __entry->order, __entry->migratetype)
302 TRACE_EVENT(mm_page_alloc_extfrag,
304 TP_PROTO(struct page *page,
305 int alloc_order, int fallback_order,
306 int alloc_migratetype, int fallback_migratetype),
309 alloc_order, fallback_order,
310 alloc_migratetype, fallback_migratetype),
313 __field( unsigned long, pfn )
314 __field( int, alloc_order )
315 __field( int, fallback_order )
316 __field( int, alloc_migratetype )
317 __field( int, fallback_migratetype )
318 __field( int, change_ownership )
322 __entry->pfn = page_to_pfn(page);
323 __entry->alloc_order = alloc_order;
324 __entry->fallback_order = fallback_order;
325 __entry->alloc_migratetype = alloc_migratetype;
326 __entry->fallback_migratetype = fallback_migratetype;
327 __entry->change_ownership = (alloc_migratetype ==
328 get_pageblock_migratetype(page));
331 TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
332 pfn_to_page(__entry->pfn),
334 __entry->alloc_order,
335 __entry->fallback_order,
337 __entry->alloc_migratetype,
338 __entry->fallback_migratetype,
339 __entry->fallback_order < pageblock_order,
340 __entry->change_ownership)
344 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
346 #ifndef __PTR_TO_HASHVAL
347 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
350 unsigned long hashval;
352 ret = ptr_to_hashval(ptr, &hashval);
356 /* The hashed value is only 32-bit */
357 return (unsigned int)hashval;
359 #define __PTR_TO_HASHVAL
362 #define TRACE_MM_PAGES \
371 #define EM(a) TRACE_DEFINE_ENUM(a);
372 #define EMe(a) TRACE_DEFINE_ENUM(a);
379 #define EM(a) { a, #a },
380 #define EMe(a) { a, #a }
382 TRACE_EVENT(rss_stat,
384 TP_PROTO(struct mm_struct *mm,
388 TP_ARGS(mm, member, count),
391 __field(unsigned int, mm_id)
392 __field(unsigned int, curr)
398 __entry->mm_id = mm_ptr_to_hash(mm);
399 __entry->curr = !!(current->mm == mm);
400 __entry->member = member;
401 __entry->size = (count << PAGE_SHIFT);
404 TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
407 __print_symbolic(__entry->member, TRACE_MM_PAGES),
410 #endif /* _TRACE_KMEM_H */
412 /* This part must be outside protection */
413 #include <trace/define_trace.h>