mm: introduce compound_nr()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 23 Sep 2019 22:34:30 +0000 (15:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Sep 2019 22:54:08 +0000 (15:54 -0700)
Replace 1 << compound_order(page) with compound_nr(page).  Minor
improvements in readability.

Link: http://lkml.kernel.org/r/20190721104612.19120-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
18 files changed:
arch/arm/mm/flush.c
arch/powerpc/mm/hugetlbpage.c
fs/proc/task_mmu.c
include/linux/mm.h
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/hugetlb_cgroup.c
mm/kasan/common.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/migrate.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/swap_state.c
mm/util.c
mm/vmscan.c

index 4c7ebe0..6d89db7 100644 (file)
@@ -208,13 +208,13 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
        } else {
                unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
-                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                       for (i = 0; i < compound_nr(page); i++) {
                                void *addr = kmap_atomic(page + i);
                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
                                kunmap_atomic(addr);
                        }
                } else {
-                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                       for (i = 0; i < compound_nr(page); i++) {
                                void *addr = kmap_high_get(page + i);
                                if (addr) {
                                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
index a8953f1..73d4873 100644 (file)
@@ -667,7 +667,7 @@ void flush_dcache_icache_hugepage(struct page *page)
 
        BUG_ON(!PageCompound(page));
 
-       for (i = 0; i < (1UL << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (!PageHighMem(page)) {
                        __flush_dcache_icache(page_address(page+i));
                } else {
index bf43d1d..ea16304 100644 (file)
@@ -461,7 +461,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
                bool compound, bool young, bool dirty, bool locked)
 {
-       int i, nr = compound ? 1 << compound_order(page) : 1;
+       int i, nr = compound ? compound_nr(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
        /*
index 9238548..69b7314 100644 (file)
@@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
        page[1].compound_order = order;
 }
 
+/* Returns the number of pages in this potentially compound page. */
+static inline unsigned long compound_nr(struct page *page)
+{
+       return 1UL << compound_order(page);
+}
+
 /* Returns the number of bytes in this potentially compound page. */
 static inline unsigned long page_size(struct page *page)
 {
index 952dc2f..777c088 100644 (file)
@@ -969,7 +969,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                         * is safe to read and it's 0 for tail pages.
                         */
                        if (unlikely(PageCompound(page))) {
-                               low_pfn += (1UL << compound_order(page)) - 1;
+                               low_pfn += compound_nr(page) - 1;
                                goto isolate_fail;
                        }
                }
index 40667c2..5f30aed 100644 (file)
@@ -126,7 +126,7 @@ static void page_cache_delete(struct address_space *mapping,
        /* hugetlb pages are represented by a single entry in the xarray */
        if (!PageHuge(page)) {
                xas_set_order(&xas, page->index, compound_order(page));
-               nr = 1U << compound_order(page);
+               nr = compound_nr(page);
        }
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
index 98f13ab..84a36d8 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1460,7 +1460,7 @@ check_again:
                 * gup may start from a tail page. Advance step by the left
                 * part.
                 */
-               step = (1 << compound_order(head)) - (pages[i] - head);
+               step = compound_nr(head) - (pages[i] - head);
                /*
                 * If we get a page from the CMA zone, since we are going to
                 * be pinning these entries, we might as well move them out
index 68c2f2f..f1930fa 100644 (file)
@@ -139,7 +139,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
        if (!page_hcg || page_hcg != h_cg)
                goto out;
 
-       nr_pages = 1 << compound_order(page);
+       nr_pages = compound_nr(page);
        if (!parent) {
                parent = root_h_cgroup;
                /* root has no limit */
index 307631d..6814d6d 100644 (file)
@@ -336,7 +336,7 @@ void kasan_poison_slab(struct page *page)
 {
        unsigned long i;
 
-       for (i = 0; i < (1 << compound_order(page)); i++)
+       for (i = 0; i < compound_nr(page); i++)
                page_kasan_tag_reset(page + i);
        kasan_poison_shadow(page_address(page), page_size(page),
                        KASAN_KMALLOC_REDZONE);
index f3c15bb..6c6032c 100644 (file)
@@ -6511,7 +6511,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                unsigned int nr_pages = 1;
 
                if (PageTransHuge(page)) {
-                       nr_pages <<= compound_order(page);
+                       nr_pages = compound_nr(page);
                        ug->nr_huge += nr_pages;
                }
                if (PageAnon(page))
@@ -6523,7 +6523,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                }
                ug->pgpgout++;
        } else {
-               ug->nr_kmem += 1 << compound_order(page);
+               ug->nr_kmem += compound_nr(page);
                __ClearPageKmemcg(page);
        }
 
index c73f099..5f2c83c 100644 (file)
@@ -1309,7 +1309,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                head = compound_head(page);
                if (page_huge_active(head))
                        return pfn;
-               skip = (1 << compound_order(head)) - (page - head);
+               skip = compound_nr(head) - (page - head);
                pfn += skip - 1;
        }
        return 0;
@@ -1347,7 +1347,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 
                if (PageHuge(page)) {
                        struct page *head = compound_head(page);
-                       pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+                       pfn = page_to_pfn(head) + compound_nr(head) - 1;
                        isolate_huge_page(head, &source);
                        continue;
                } else if (PageTransHuge(page))
index 9f4ed4e..aa72b49 100644 (file)
@@ -1892,7 +1892,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
        VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
 
        /* Avoid migrating to a node that is nearly full */
-       if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
+       if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
                return 0;
 
        if (isolate_lru_page(page))
index ff5484f..df566c0 100644 (file)
@@ -8196,7 +8196,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                        if (!hugepage_migration_supported(page_hstate(head)))
                                goto unmovable;
 
-                       skip_pages = (1 << compound_order(head)) - (page - head);
+                       skip_pages = compound_nr(head) - (page - head);
                        iter += skip_pages - 1;
                        continue;
                }
index f401732..2600644 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1520,8 +1520,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
                        pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
                        if (PageHuge(page)) {
-                               int nr = 1 << compound_order(page);
-                               hugetlb_count_sub(nr, mm);
+                               hugetlb_count_sub(compound_nr(page), mm);
                                set_huge_swap_pte_at(mm, address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
index 0f7fd4a..15d26c8 100644 (file)
@@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
 {
        XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
        unsigned long i = 0;
-       unsigned long nr = 1UL << compound_order(page);
+       unsigned long nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(PageTail(page), page);
        VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -1884,7 +1884,7 @@ alloc_nohuge:
        lru_cache_add_anon(page);
 
        spin_lock_irq(&info->lock);
-       info->alloced += 1 << compound_order(page);
+       info->alloced += compound_nr(page);
        inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
        shmem_recalc_inode(inode);
        spin_unlock_irq(&info->lock);
@@ -1925,7 +1925,7 @@ clear:
                struct page *head = compound_head(page);
                int i;
 
-               for (i = 0; i < (1 << compound_order(head)); i++) {
+               for (i = 0; i < compound_nr(head); i++) {
                        clear_highpage(head + i);
                        flush_dcache_page(head + i);
                }
@@ -1952,7 +1952,7 @@ clear:
         * Error recovery.
         */
 unacct:
-       shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
+       shmem_inode_unacct_blocks(inode, compound_nr(page));
 
        if (PageTransHuge(page)) {
                unlock_page(page);
index 8368621..f844af5 100644 (file)
@@ -116,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = 1UL << compound_order(page);
+       unsigned long i, nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapCache(page), page);
index e6351a8..bab284d 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -521,7 +521,7 @@ bool page_mapped(struct page *page)
                return true;
        if (PageHuge(page))
                return false;
-       for (i = 0; i < (1 << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (atomic_read(&page[i]._mapcount) >= 0)
                        return true;
        }
index a6c5d0b..8e03427 100644 (file)
@@ -1149,7 +1149,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                VM_BUG_ON_PAGE(PageActive(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
 
                /* Account the number of base pages even though THP */
                sc->nr_scanned += nr_pages;
@@ -1705,7 +1705,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                VM_BUG_ON_PAGE(!PageLRU(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
                total_scan += nr_pages;
 
                if (page_zonenum(page) > sc->reclaim_idx) {