mm: memcg: add THP swap out info for anonymous reclaim
authorXin Hao <vernhao@tencent.com>
Wed, 13 Sep 2023 16:49:37 +0000 (00:49 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Oct 2023 17:32:27 +0000 (10:32 -0700)
At present, we support per-memcg reclaim strategy, however we do not know
the number of transparent huge pages being reclaimed, as we know the
transparent huge pages need to be splited before reclaim them, and they
will bring some performance bottleneck effect.  for example, when two
memcg (A & B) are doing reclaim for anonymous pages at same time, and 'A'
memcg is reclaiming a large number of transparent huge pages, we can
better analyze that the performance bottleneck will be caused by 'A'
memcg.  therefore, in order to better analyze such problems, there add THP
swap out info for per-memcg.

[akpm@linux-foundation.orgL fix swap_writepage_fs(), per Johannes]
Link: https://lkml.kernel.org/r/20230913213343.GB48476@cmpxchg.org
Link: https://lkml.kernel.org/r/20230913164938.16918-1-vernhao@tencent.com
Signed-off-by: Xin Hao <vernhao@tencent.com>
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/cgroup-v2.rst
mm/memcontrol.c
mm/page_io.c
mm/vmscan.c

index b26b527..622a7f2 100644 (file)
@@ -1532,6 +1532,15 @@ PAGE_SIZE multiple when read back.
                collapsing an existing range of pages. This counter is not
                present when CONFIG_TRANSPARENT_HUGEPAGE is not set.
 
+         thp_swpout (npn)
+               Number of transparent hugepages which are swapout in one piece
+               without splitting.
+
+         thp_swpout_fallback (npn)
+               Number of transparent hugepages which were split before swapout.
+               Usually because failed to allocate some continuous swap space
+               for the huge page.
+
   memory.numa_stat
        A read-only nested-keyed file which exists on non-root cgroups.
 
index 5b009b2..6831333 100644 (file)
@@ -704,6 +704,8 @@ static const unsigned int memcg_vm_event_stat[] = {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        THP_FAULT_ALLOC,
        THP_COLLAPSE_ALLOC,
+       THP_SWPOUT,
+       THP_SWPOUT_FALLBACK,
 #endif
 };
 
index fe4c21a..cb559ae 100644 (file)
@@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
 static inline void count_swpout_vm_event(struct folio *folio)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (unlikely(folio_test_pmd_mappable(folio)))
+       if (unlikely(folio_test_pmd_mappable(folio))) {
+               count_memcg_folio_events(folio, THP_SWPOUT, 1);
                count_vm_event(THP_SWPOUT);
+       }
 #endif
        count_vm_events(PSWPOUT, folio_nr_pages(folio));
 }
@@ -278,9 +280,6 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
                        set_page_dirty(page);
                        ClearPageReclaim(page);
                }
-       } else {
-               for (p = 0; p < sio->pages; p++)
-                       count_swpout_vm_event(page_folio(sio->bvec[p].bv_page));
        }
 
        for (p = 0; p < sio->pages; p++)
@@ -296,6 +295,7 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
        struct file *swap_file = sis->swap_file;
        loff_t pos = page_file_offset(page);
 
+       count_swpout_vm_event(page_folio(page));
        set_page_writeback(page);
        unlock_page(page);
        if (wbc->swap_plug)
index 8a3f83e..acf1154 100644 (file)
@@ -1214,6 +1214,7 @@ retry:
                                                                folio_list))
                                                goto activate_locked;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                                       count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1);
                                        count_vm_event(THP_SWPOUT_FALLBACK);
 #endif
                                        if (!add_to_swap(folio))