hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
authorMike Kravetz <mike.kravetz@oracle.com>
Fri, 28 Dec 2018 08:39:38 +0000 (00:39 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Dec 2018 20:11:51 +0000 (12:11 -0800)
While looking at BUGs associated with invalid huge page map counts, it was
discovered and observed that a huge pte pointer could become 'invalid' and
point to another task's page table.  Consider the following:

A task takes a page fault on a shared hugetlbfs file and calls
huge_pte_alloc to get a ptep.  Suppose the returned ptep points to a
shared pmd.

Now, another task truncates the hugetlbfs file.  As part of truncation, it
unmaps everyone who has the file mapped.  If the range being truncated is
covered by a shared pmd, huge_pmd_unshare will be called.  For all but the
last user of the shared pmd, huge_pmd_unshare will clear the pud pointing
to the pmd.  If the task in the middle of the page fault is not the last
user, the ptep returned by huge_pte_alloc now points to another task's
page table or worse.  This leads to bad things such as incorrect page
map/reference counts or invalid memory references.

To fix, expand the use of i_mmap_rwsem as follows:

- i_mmap_rwsem is held in read mode whenever huge_pmd_share is called.
  huge_pmd_share is only called via huge_pte_alloc, so callers of
  huge_pte_alloc take i_mmap_rwsem before calling.  In addition, callers
  of huge_pte_alloc continue to hold the semaphore until finished with the
  ptep.

- i_mmap_rwsem is held in write mode whenever huge_pmd_unshare is
  called.

[mike.kravetz@oracle.com: add explicit check for mapping != null]
Link: http://lkml.kernel.org/r/20181218223557.5202-2-mike.kravetz@oracle.com
Fixes: 39dde65c9940 ("shared page table for hugetlb page")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/hugetlb.c
mm/memory-failure.c
mm/migrate.c
mm/rmap.c
mm/userfaultfd.c

index 12000ba..87fd3ab 100644 (file)
@@ -3238,6 +3238,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        struct page *ptepage;
        unsigned long addr;
        int cow;
+       struct address_space *mapping = vma->vm_file->f_mapping;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
        struct mmu_notifier_range range;
@@ -3249,13 +3250,23 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                mmu_notifier_range_init(&range, src, vma->vm_start,
                                        vma->vm_end);
                mmu_notifier_invalidate_range_start(&range);
+       } else {
+               /*
+                * For shared mappings i_mmap_rwsem must be held to call
+                * huge_pte_alloc, otherwise the returned ptep could go
+                * away if part of a shared pmd and another thread calls
+                * huge_pmd_unshare.
+                */
+               i_mmap_lock_read(mapping);
        }
 
        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
                spinlock_t *src_ptl, *dst_ptl;
+
                src_pte = huge_pte_offset(src, addr, sz);
                if (!src_pte)
                        continue;
+
                dst_pte = huge_pte_alloc(dst, addr, sz);
                if (!dst_pte) {
                        ret = -ENOMEM;
@@ -3326,6 +3337,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 
        if (cow)
                mmu_notifier_invalidate_range_end(&range);
+       else
+               i_mmap_unlock_read(mapping);
 
        return ret;
 }
@@ -3771,14 +3784,18 @@ retry:
                        };
 
                        /*
-                        * hugetlb_fault_mutex must be dropped before
-                        * handling userfault.  Reacquire after handling
-                        * fault to make calling code simpler.
+                        * hugetlb_fault_mutex and i_mmap_rwsem must be
+                        * dropped before handling userfault.  Reacquire
+                        * after handling fault to make calling code simpler.
                         */
                        hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
                                                        idx, haddr);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+                       i_mmap_unlock_read(mapping);
+
                        ret = handle_userfault(&vmf, VM_UFFD_MISSING);
+
+                       i_mmap_lock_read(mapping);
                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
                        goto out;
                }
@@ -3926,6 +3943,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
        if (ptep) {
+               /*
+                * Since we hold no locks, ptep could be stale.  That is
+                * OK as we are only making decisions based on content and
+                * not actually modifying content here.
+                */
                entry = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_migration(entry))) {
                        migration_entry_wait_huge(vma, mm, ptep);
@@ -3933,20 +3955,31 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
-       } else {
-               ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
-               if (!ptep)
-                       return VM_FAULT_OOM;
        }
 
+       /*
+        * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
+        * until finished with ptep.  This prevents huge_pmd_unshare from
+        * being called elsewhere and making the ptep no longer valid.
+        *
+        * ptep could have already be assigned via huge_pte_offset.  That
+        * is OK, as huge_pte_alloc will return the same value unless
+        * something changed.
+        */
        mapping = vma->vm_file->f_mapping;
-       idx = vma_hugecache_offset(h, vma, haddr);
+       i_mmap_lock_read(mapping);
+       ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
+       if (!ptep) {
+               i_mmap_unlock_read(mapping);
+               return VM_FAULT_OOM;
+       }
 
        /*
         * Serialize hugepage allocation and instantiation, so that we don't
         * get spurious allocation failures if two CPUs race to instantiate
         * the same page in the page cache.
         */
+       idx = vma_hugecache_offset(h, vma, haddr);
        hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
        mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
@@ -4034,6 +4067,7 @@ out_ptl:
        }
 out_mutex:
        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+       i_mmap_unlock_read(mapping);
        /*
         * Generally it's safe to hold refcount during waiting page lock. But
         * here we just wait to defer the next page fault to avoid busy loop and
@@ -4638,10 +4672,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  * and returns the corresponding pte. While this is not necessary for the
  * !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner. pmd allocation is essential for the shared case because
- * pud has to be populated inside the same i_mmap_rwsem section - otherwise
- * racing tasks could either miss the sharing (see huge_pte_offset) or select a
- * bad pmd for sharing.
+ * code much cleaner.
+ *
+ * This routine must be called with i_mmap_rwsem held in at least read mode.
+ * For hugetlbfs, this prevents removal of any page table entries associated
+ * with the address space.  This is important as we are setting up sharing
+ * based on existing page table entries (mappings).
  */
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
 {
@@ -4658,7 +4694,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (!vma_shareable(vma, addr))
                return (pte_t *)pmd_alloc(mm, pud, addr);
 
-       i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
                if (svma == vma)
                        continue;
@@ -4688,7 +4723,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        spin_unlock(ptl);
 out:
        pte = (pte_t *)pmd_alloc(mm, pud, addr);
-       i_mmap_unlock_write(mapping);
        return pte;
 }
 
@@ -4699,7 +4733,7 @@ out:
  * indicated by page_count > 1, unmap is achieved by clearing pud and
  * decrementing the ref count. If count == 1, the pte page is not shared.
  *
- * called with page table lock held.
+ * Called with page table lock held and i_mmap_rwsem held in write mode.
  *
  * returns: 1 successfully unmapped a shared pte page
  *         0 the underlying pte page is not shared, or it is the last user
index 7c72f2a..6379fff 100644 (file)
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
        enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
        struct address_space *mapping;
        LIST_HEAD(tokill);
-       bool unmap_success;
+       bool unmap_success = true;
        int kill = 1, forcekill;
        struct page *hpage = *hpagep;
        bool mlocked = PageMlocked(hpage);
@@ -1028,7 +1028,19 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
        if (kill)
                collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
 
-       unmap_success = try_to_unmap(hpage, ttu);
+       if (!PageHuge(hpage)) {
+               unmap_success = try_to_unmap(hpage, ttu);
+       } else if (mapping) {
+               /*
+                * For hugetlb pages, try_to_unmap could potentially call
+                * huge_pmd_unshare.  Because of this, take semaphore in
+                * write mode here and set TTU_RMAP_LOCKED to indicate we
+                * have taken the lock at this higer level.
+                */
+               i_mmap_lock_write(mapping);
+               unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
+               i_mmap_unlock_write(mapping);
+       }
        if (!unmap_success)
                pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
                       pfn, page_mapcount(hpage));
index 4389696..5d1839a 100644 (file)
@@ -1324,8 +1324,19 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                goto put_anon;
 
        if (page_mapped(hpage)) {
+               struct address_space *mapping = page_mapping(hpage);
+
+               /*
+                * try_to_unmap could potentially call huge_pmd_unshare.
+                * Because of this, take semaphore in write mode here and
+                * set TTU_RMAP_LOCKED to let lower levels know we have
+                * taken the lock.
+                */
+               i_mmap_lock_write(mapping);
                try_to_unmap(hpage,
-                       TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+                       TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+                       TTU_RMAP_LOCKED);
+               i_mmap_unlock_write(mapping);
                page_was_mapped = 1;
        }
 
index 68a1a5b..21a26cf 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,6 +25,7 @@
  *     page->flags PG_locked (lock_page)
  *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
  *         mapping->i_mmap_rwsem
+ *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
  *           anon_vma->rwsem
  *             mm->page_table_lock or pte_lock
  *               zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1378,6 +1379,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                /*
                 * If sharing is possible, start and end will be adjusted
                 * accordingly.
+                *
+                * If called for a huge page, caller must hold i_mmap_rwsem
+                * in write mode as it is possible to call huge_pmd_unshare.
                 */
                adjust_range_if_pmd_sharing_possible(vma, &range.start,
                                                     &range.end);
index 458acda..4836858 100644 (file)
@@ -267,10 +267,14 @@ retry:
                VM_BUG_ON(dst_addr & ~huge_page_mask(h));
 
                /*
-                * Serialize via hugetlb_fault_mutex
+                * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
+                * i_mmap_rwsem ensures the dst_pte remains valid even
+                * in the case of shared pmds.  fault mutex prevents
+                * races with other faulting threads.
                 */
-               idx = linear_page_index(dst_vma, dst_addr);
                mapping = dst_vma->vm_file->f_mapping;
+               i_mmap_lock_read(mapping);
+               idx = linear_page_index(dst_vma, dst_addr);
                hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
                                                                idx, dst_addr);
                mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -279,6 +283,7 @@ retry:
                dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
                if (!dst_pte) {
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+                       i_mmap_unlock_read(mapping);
                        goto out_unlock;
                }
 
@@ -286,6 +291,7 @@ retry:
                dst_pteval = huge_ptep_get(dst_pte);
                if (!huge_pte_none(dst_pteval)) {
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+                       i_mmap_unlock_read(mapping);
                        goto out_unlock;
                }
 
@@ -293,6 +299,7 @@ retry:
                                                dst_addr, src_addr, &page);
 
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+               i_mmap_unlock_read(mapping);
                vm_alloc_shared = vm_shared;
 
                cond_resched();