mm: consolidate common checks in hugetlb_get_unmapped_area
authorOscar Salvador <osalvador@suse.de>
Mon, 7 Oct 2024 07:50:37 +0000 (09:50 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 7 Nov 2024 04:11:10 +0000 (20:11 -0800)
prepare_hugepage_range() performs almost the same checks for all
architectures that define it, with the exception of mips and loongarch
that also check for overflows.

The rest checks for the addr and len to be properly aligned, so we can
move that to hugetlb_get_unmapped_area() and get rid of a fair amount of
duplicated code.

[akpm@linux-foundation.org: remove now-unused local]
Link: https://lore.kernel.org/oe-kbuild-all/202410081210.uNLbf3Jk-lkp@intel.com/
Link: https://lkml.kernel.org/r/20241007075037.267650-10-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Donet Tom <donettom@linux.ibm.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/loongarch/include/asm/hugetlb.h
arch/mips/include/asm/hugetlb.h
arch/parisc/include/asm/hugetlb.h
arch/s390/include/asm/hugetlb.h
arch/sh/include/asm/hugetlb.h
fs/hugetlbfs/inode.c
include/asm-generic/hugetlb.h

index 5da32c0..b837c65 100644 (file)
@@ -16,12 +16,7 @@ static inline int prepare_hugepage_range(struct file *file,
                                         unsigned long len)
 {
        unsigned long task_size = STACK_TOP;
-       struct hstate *h = hstate_file(file);
 
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
        if (len > task_size)
                return -ENOMEM;
        if (task_size - len < addr)
index fd69c88..d0a86ce 100644 (file)
@@ -17,12 +17,7 @@ static inline int prepare_hugepage_range(struct file *file,
                                         unsigned long len)
 {
        unsigned long task_size = STACK_TOP;
-       struct hstate *h = hstate_file(file);
 
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
        if (len > task_size)
                return -ENOMEM;
        if (task_size - len < addr)
index 72daacc..5b3a542 100644 (file)
@@ -12,21 +12,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep);
 
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
-                       unsigned long addr, unsigned long len)
-{
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-       if (addr & ~HPAGE_MASK)
-               return -EINVAL;
-       return 0;
-}
-
 #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
                                          unsigned long addr, pte_t *ptep)
index 37e80a3..6f815d4 100644 (file)
@@ -25,23 +25,6 @@ extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep
 extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                              unsigned long addr, pte_t *ptep);
 
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
-                       unsigned long addr, unsigned long len)
-{
-       struct hstate *h = hstate_file(file);
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-       return 0;
-}
-
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
        clear_bit(PG_arch_1, &folio->flags);
index 75028bd..4a92e6e 100644 (file)
@@ -5,21 +5,6 @@
 #include <asm/cacheflush.h>
 #include <asm/page.h>
 
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
-                       unsigned long addr, unsigned long len)
-{
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-       if (addr & ~HPAGE_MASK)
-               return -EINVAL;
-       return 0;
-}
-
 #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
                                          unsigned long addr, pte_t *ptep)
index 935c0ed..c6191a6 100644 (file)
@@ -181,8 +181,12 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 
        if (len & ~huge_page_mask(h))
                return -EINVAL;
-       if ((flags & MAP_FIXED) && prepare_hugepage_range(file, addr, len))
-               return -EINVAL;
+       if (flags & MAP_FIXED) {
+               if (addr & ~huge_page_mask(h))
+                       return -EINVAL;
+               if (prepare_hugepage_range(file, addr, len))
+                       return -EINVAL;
+       }
        if (addr)
                addr0 = ALIGN(addr, huge_page_size(h));
 
index 67bbdaf..f42133d 100644 (file)
@@ -123,13 +123,6 @@ static inline int huge_pte_none_mostly(pte_t pte)
 static inline int prepare_hugepage_range(struct file *file,
                unsigned long addr, unsigned long len)
 {
-       struct hstate *h = hstate_file(file);
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-
        return 0;
 }
 #endif