From: Matthew Wilcox (Oracle) Date: Wed, 2 Apr 2025 18:17:02 +0000 (+0100) Subject: hugetlb: simplify make_huge_pte() X-Git-Url: http://git.monstr.eu/?a=commitdiff_plain;h=e06fa168c342c8b937cd72c89582e6af008b204b;p=linux-2.6-microblaze.git hugetlb: simplify make_huge_pte() mk_huge_pte() is a bad API. Despite its name, it creates a normal PTE which is later transformed into a huge PTE by arch_make_huge_pte(). So replace the page argument with a folio argument and call folio_mk_pte() instead. Then, because we now know this is a regular PTE rather than a huge one, use pte_mkdirty() instead of huge_pte_mkdirty() (and similar functions). Link: https://lkml.kernel.org/r/20250402181709.2386022-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Zi Yan Cc: Muchun Song Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Anton Ivanov Cc: Dave Hansen Cc: David Hildenbrand Cc: "David S. Miller" Cc: Geert Uytterhoeven Cc: Johannes Berg Cc: Richard Weinberger Cc: Signed-off-by: Andrew Morton --- diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7ae38bfb9096..a44d4b0d844c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5472,18 +5472,16 @@ const struct vm_operations_struct hugetlb_vm_ops = { .pagesize = hugetlb_vm_op_pagesize, }; -static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, +static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio, bool try_mkwrite) { - pte_t entry; + pte_t entry = folio_mk_pte(folio, vma->vm_page_prot); unsigned int shift = huge_page_shift(hstate_vma(vma)); if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { - entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, - vma->vm_page_prot))); + entry = pte_mkwrite_novma(pte_mkdirty(entry)); } else { - entry = huge_pte_wrprotect(mk_huge_pte(page, - vma->vm_page_prot)); + entry = pte_wrprotect(entry); } entry = pte_mkyoung(entry); entry = arch_make_huge_pte(entry, shift, vma->vm_flags); @@ -5538,7 +5536,7 @@ static void hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old, unsigned long sz) { - pte_t newpte = make_huge_pte(vma, &new_folio->page, true); + pte_t newpte = make_huge_pte(vma, new_folio, true); __folio_mark_uptodate(new_folio); hugetlb_add_new_anon_rmap(new_folio, vma, addr); @@ -6288,7 +6286,7 @@ retry_avoidcopy: spin_lock(vmf->ptl); vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { - pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); + pte_t newpte = make_huge_pte(vma, new_folio, !unshare); /* Break COW or unshare */ huge_ptep_clear_flush(vma, vmf->address, vmf->pte); @@ -6568,7 +6566,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, hugetlb_add_new_anon_rmap(folio, vma, vmf->address); else hugetlb_add_file_rmap(folio); - new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED); + new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED); /* * If this pte was previously wr-protected, keep it wr-protected even * if populated. @@ -7053,7 +7051,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY * with wp flag set, don't set pte write bit. */ - _dst_pte = make_huge_pte(dst_vma, &folio->page, + _dst_pte = make_huge_pte(dst_vma, folio, !wp_enabled && !(is_continue && !vm_shared)); /* * Always mark UFFDIO_COPY page dirty; note that this may not be