mm: kmem: rename (__)memcg_kmem_(un)charge_memcg() to __memcg_kmem_(un)charge()
authorRoman Gushchin <guro@fb.com>
Thu, 2 Apr 2020 04:06:56 +0000 (21:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Apr 2020 16:35:28 +0000 (09:35 -0700)
Drop the _memcg suffix from (__)memcg_kmem_(un)charge functions.  It's
shorter and more obvious.

These are the most basic functions which are just (un)charging the given
cgroup with the given amount of pages.

Also fix up the corresponding comments.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Link: http://lkml.kernel.org/r/20200109202659.752357-7-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/slab.h

index b819a64..1b4150f 100644 (file)
@@ -1367,12 +1367,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
 void memcg_kmem_put_cache(struct kmem_cache *cachep);
 
 #ifdef CONFIG_MEMCG_KMEM
+int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+                       unsigned int nr_pages);
+void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
 void __memcg_kmem_uncharge_page(struct page *page, int order);
-int __memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
-                             unsigned int nr_pages);
-void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
-                                unsigned int nr_pages);
 
 extern struct static_key_false memcg_kmem_enabled_key;
 extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1408,19 +1407,19 @@ static inline void memcg_kmem_uncharge_page(struct page *page, int order)
                __memcg_kmem_uncharge_page(page, order);
 }
 
-static inline int memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
-                                         unsigned int nr_pages)
+static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+                                   unsigned int nr_pages)
 {
        if (memcg_kmem_enabled())
-               return __memcg_kmem_charge_memcg(memcg, gfp, nr_pages);
+               return __memcg_kmem_charge(memcg, gfp, nr_pages);
        return 0;
 }
 
-static inline void memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
-                                            unsigned int nr_pages)
+static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
+                                      unsigned int nr_pages)
 {
        if (memcg_kmem_enabled())
-               __memcg_kmem_uncharge_memcg(memcg, nr_pages);
+               __memcg_kmem_uncharge(memcg, nr_pages);
 }
 
 /*
index a008669..e6043ab 100644 (file)
@@ -2881,15 +2881,15 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
 }
 
 /**
- * __memcg_kmem_charge_memcg: charge a kmem page
+ * __memcg_kmem_charge: charge a number of kernel pages to a memcg
  * @memcg: memory cgroup to charge
  * @gfp: reclaim mode
  * @nr_pages: number of pages to charge
  *
  * Returns 0 on success, an error code on failure.
  */
-int __memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
-                             unsigned int nr_pages)
+int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+                       unsigned int nr_pages)
 {
        struct page_counter *counter;
        int ret;
@@ -2916,6 +2916,21 @@ int __memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
        return 0;
 }
 
+/**
+ * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
+ * @memcg: memcg to uncharge
+ * @nr_pages: number of pages to uncharge
+ */
+void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
+{
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               page_counter_uncharge(&memcg->kmem, nr_pages);
+
+       page_counter_uncharge(&memcg->memory, nr_pages);
+       if (do_memsw_account())
+               page_counter_uncharge(&memcg->memsw, nr_pages);
+}
+
 /**
  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
  * @page: page to charge
@@ -2934,7 +2949,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
 
        memcg = get_mem_cgroup_from_current();
        if (!mem_cgroup_is_root(memcg)) {
-               ret = __memcg_kmem_charge_memcg(memcg, gfp, 1 << order);
+               ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
                if (!ret) {
                        page->mem_cgroup = memcg;
                        __SetPageKmemcg(page);
@@ -2944,21 +2959,6 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
        return ret;
 }
 
-/**
- * __memcg_kmem_uncharge_memcg: uncharge a kmem page
- * @memcg: memcg to uncharge
- * @nr_pages: number of pages to uncharge
- */
-void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
-                                unsigned int nr_pages)
-{
-       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
-               page_counter_uncharge(&memcg->kmem, nr_pages);
-
-       page_counter_uncharge(&memcg->memory, nr_pages);
-       if (do_memsw_account())
-               page_counter_uncharge(&memcg->memsw, nr_pages);
-}
 /**
  * __memcg_kmem_uncharge_page: uncharge a kmem page
  * @page: page to uncharge
@@ -2973,7 +2973,7 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
                return;
 
        VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
-       __memcg_kmem_uncharge_memcg(memcg, nr_pages);
+       __memcg_kmem_uncharge(memcg, nr_pages);
        page->mem_cgroup = NULL;
 
        /* slab pages do not have PageKmemcg flag set */
index 43f8ce4..207c83e 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -366,7 +366,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
                return 0;
        }
 
-       ret = memcg_kmem_charge_memcg(memcg, gfp, nr_pages);
+       ret = memcg_kmem_charge(memcg, gfp, nr_pages);
        if (ret)
                goto out;
 
@@ -397,7 +397,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
        if (likely(!mem_cgroup_is_root(memcg))) {
                lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
                mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
-               memcg_kmem_uncharge_memcg(memcg, nr_pages);
+               memcg_kmem_uncharge(memcg, nr_pages);
        } else {
                mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
                                    -nr_pages);