From 04bbfd844b99c7c89a344236d3758e7a2d41572f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 24 Aug 2023 15:13:24 +0100 Subject: [PATCH] hugetlb: remove a few calls to page_folio() Anything found on a linked list threaded through ->lru is guaranteed to be a folio as the compound_head found in a tail page overlaps the ->lru member of struct page. So we can pull folios directly off these lists no matter whether pages or folios were added to the list. Link: https://lkml.kernel.org/r/20230824141325.2704553-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Mike Kravetz Reviewed-by: Muchun Song Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- mm/hugetlb.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7a6fe4f13777..e91ce966a2c9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1836,11 +1836,9 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) { - struct page *page, *t_page; - struct folio *folio; + struct folio *folio, *t_folio; - list_for_each_entry_safe(page, t_page, list, lru) { - folio = page_folio(page); + list_for_each_entry_safe(folio, t_folio, list, lru) { update_and_free_hugetlb_folio(h, folio, false); cond_resched(); } @@ -2229,8 +2227,7 @@ static struct page *remove_pool_huge_page(struct hstate *h, bool acct_surplus) { int nr_nodes, node; - struct page *page = NULL; - struct folio *folio; + struct folio *folio = NULL; lockdep_assert_held(&hugetlb_lock); for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { @@ -2240,15 +2237,14 @@ static struct page *remove_pool_huge_page(struct hstate *h, */ if ((!acct_surplus || h->surplus_huge_pages_node[node]) && !list_empty(&h->hugepage_freelists[node])) { - page = list_entry(h->hugepage_freelists[node].next, - struct page, lru); - folio = page_folio(page); + folio = list_entry(h->hugepage_freelists[node].next, + struct folio, lru); remove_hugetlb_folio(h, folio, acct_surplus); break; } } - return page; + return &folio->page; } /* @@ -3414,15 +3410,15 @@ static void try_to_free_low(struct hstate *h, unsigned long count, * Collect pages to be freed on a list, and free after dropping lock */ for_each_node_mask(i, *nodes_allowed) { - struct page *page, *next; + struct folio *folio, *next; struct list_head *freel = &h->hugepage_freelists[i]; - list_for_each_entry_safe(page, next, freel, lru) { + list_for_each_entry_safe(folio, next, freel, lru) { if (count >= h->nr_huge_pages) goto out; - if (PageHighMem(page)) + if (folio_test_highmem(folio)) continue; - remove_hugetlb_folio(h, page_folio(page), false); - list_add(&page->lru, &page_list); + remove_hugetlb_folio(h, folio, false); + list_add(&folio->lru, &page_list); } } -- 2.20.1