mm: cma: add the CMA instance name to cma trace events
authorMinchan Kim <minchan@kernel.org>
Wed, 5 May 2021 01:37:31 +0000 (18:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 5 May 2021 18:27:24 +0000 (11:27 -0700)
There were missing places to add cma instance name.  To identify each CMA
instance, let's add the name for every cma trace.  This patch also changes
the existing cma_trace_alloc to cma_trace_finish since we have
cma_alloc_start[1].

[1] https://lore.kernel.org/linux-mm/20210324160740.15901-1-georgi.djakov@linaro.org

Link: https://lkml.kernel.org/r/20210330220237.748899-1-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Georgi Djakov <georgi.djakov@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/trace/events/cma.h
mm/cma.c

index be1525a..5cf385a 100644 (file)
 
 DECLARE_EVENT_CLASS(cma_alloc_class,
 
-       TP_PROTO(unsigned long pfn, const struct page *page,
+       TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
                 unsigned int count, unsigned int align),
 
-       TP_ARGS(pfn, page, count, align),
+       TP_ARGS(name, pfn, page, count, align),
 
        TP_STRUCT__entry(
+               __string(name, name)
                __field(unsigned long, pfn)
                __field(const struct page *, page)
                __field(unsigned int, count)
@@ -23,13 +24,15 @@ DECLARE_EVENT_CLASS(cma_alloc_class,
        ),
 
        TP_fast_assign(
+               __assign_str(name, name);
                __entry->pfn = pfn;
                __entry->page = page;
                __entry->count = count;
                __entry->align = align;
        ),
 
-       TP_printk("pfn=%lx page=%p count=%u align=%u",
+       TP_printk("name=%s pfn=%lx page=%p count=%u align=%u",
+                 __get_str(name),
                  __entry->pfn,
                  __entry->page,
                  __entry->count,
@@ -38,24 +41,27 @@ DECLARE_EVENT_CLASS(cma_alloc_class,
 
 TRACE_EVENT(cma_release,
 
-       TP_PROTO(unsigned long pfn, const struct page *page,
+       TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
                 unsigned int count),
 
-       TP_ARGS(pfn, page, count),
+       TP_ARGS(name, pfn, page, count),
 
        TP_STRUCT__entry(
+               __string(name, name)
                __field(unsigned long, pfn)
                __field(const struct page *, page)
                __field(unsigned int, count)
        ),
 
        TP_fast_assign(
+               __assign_str(name, name);
                __entry->pfn = pfn;
                __entry->page = page;
                __entry->count = count;
        ),
 
-       TP_printk("pfn=%lx page=%p count=%u",
+       TP_printk("name=%s pfn=%lx page=%p count=%u",
+                 __get_str(name),
                  __entry->pfn,
                  __entry->page,
                  __entry->count)
@@ -85,20 +91,20 @@ TRACE_EVENT(cma_alloc_start,
                  __entry->align)
 );
 
-DEFINE_EVENT(cma_alloc_class, cma_alloc,
+DEFINE_EVENT(cma_alloc_class, cma_alloc_finish,
 
-       TP_PROTO(unsigned long pfn, const struct page *page,
+       TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
                 unsigned int count, unsigned int align),
 
-       TP_ARGS(pfn, page, count, align)
+       TP_ARGS(name, pfn, page, count, align)
 );
 
 DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
 
-       TP_PROTO(unsigned long pfn, const struct page *page,
+       TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
                 unsigned int count, unsigned int align),
 
-       TP_ARGS(pfn, page, count, align)
+       TP_ARGS(name, pfn, page, count, align)
 );
 
 #endif /* _TRACE_CMA_H */
index 2380f25..cdad8c4 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -486,12 +486,13 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
                pr_debug("%s(): memory range at %p is busy, retrying\n",
                         __func__, pfn_to_page(pfn));
 
-               trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
+               trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
+                                          count, align);
                /* try again with a bit different memory target */
                start = bitmap_no + mask + 1;
        }
 
-       trace_cma_alloc(pfn, page, count, align);
+       trace_cma_alloc_finish(cma->name, pfn, page, count, align);
 
        /*
         * CMA can allocate multiple page blocks, which results in different
@@ -551,7 +552,7 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
 
        free_contig_range(pfn, count);
        cma_clear_bitmap(cma, pfn, count);
-       trace_cma_release(pfn, pages, count);
+       trace_cma_release(cma->name, pfn, pages, count);
 
        return true;
 }