mm: page_vma_mapped_walk(): crossing page table boundary
[linux-2.6-microblaze.git] / mm / page_vma_mapped.c
index 86e3a36..9c87b30 100644 (file)
@@ -134,7 +134,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
  * regardless of which page table level the page is mapped at. @pvmw->pmd is
  * NULL.
  *
- * Retruns false if there are no more page table entries for the page in
+ * Returns false if there are no more page table entries for the page in
  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
  *
  * If you need to stop the walk before page_vma_mapped_walk() returned false,
@@ -153,10 +153,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
        if (pvmw->pmd && !pvmw->pte)
                return not_found(pvmw);
 
-       if (pvmw->pte)
-               goto next_pte;
+       if (unlikely(PageHuge(page))) {
+               /* The only possible mapping was handled on last iteration */
+               if (pvmw->pte)
+                       return not_found(pvmw);
 
-       if (unlikely(PageHuge(pvmw->page))) {
                /* when pud is not present, pte will be NULL */
                pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
                if (!pvmw->pte)
@@ -168,6 +169,9 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                        return not_found(pvmw);
                return true;
        }
+
+       if (pvmw->pte)
+               goto next_pte;
 restart:
        pgd = pgd_offset(mm, pvmw->address);
        if (!pgd_present(*pgd))
@@ -187,60 +191,69 @@ restart:
        pmde = READ_ONCE(*pvmw->pmd);
        if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
                pvmw->ptl = pmd_lock(mm, pvmw->pmd);
-               if (likely(pmd_trans_huge(*pvmw->pmd))) {
+               pmde = *pvmw->pmd;
+               if (likely(pmd_trans_huge(pmde))) {
                        if (pvmw->flags & PVMW_MIGRATION)
                                return not_found(pvmw);
-                       if (pmd_page(*pvmw->pmd) != page)
+                       if (pmd_page(pmde) != page)
                                return not_found(pvmw);
                        return true;
-               } else if (!pmd_present(*pvmw->pmd)) {
-                       if (thp_migration_supported()) {
-                               if (!(pvmw->flags & PVMW_MIGRATION))
-                                       return not_found(pvmw);
-                               if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
-                                       swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
+               }
+               if (!pmd_present(pmde)) {
+                       swp_entry_t entry;
 
-                                       if (migration_entry_to_page(entry) != page)
-                                               return not_found(pvmw);
-                                       return true;
-                               }
-                       }
-                       return not_found(pvmw);
-               } else {
-                       /* THP pmd was split under us: handle on pte level */
-                       spin_unlock(pvmw->ptl);
-                       pvmw->ptl = NULL;
+                       if (!thp_migration_supported() ||
+                           !(pvmw->flags & PVMW_MIGRATION))
+                               return not_found(pvmw);
+                       entry = pmd_to_swp_entry(pmde);
+                       if (!is_migration_entry(entry) ||
+                           migration_entry_to_page(entry) != page)
+                               return not_found(pvmw);
+                       return true;
                }
+               /* THP pmd was split under us: handle on pte level */
+               spin_unlock(pvmw->ptl);
+               pvmw->ptl = NULL;
        } else if (!pmd_present(pmde)) {
+               /*
+                * If PVMW_SYNC, take and drop THP pmd lock so that we
+                * cannot return prematurely, while zap_huge_pmd() has
+                * cleared *pmd but not decremented compound_mapcount().
+                */
+               if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
+                       spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+                       spin_unlock(ptl);
+               }
                return false;
        }
        if (!map_pte(pvmw))
                goto next_pte;
        while (1) {
+               unsigned long end;
+
                if (check_pte(pvmw))
                        return true;
 next_pte:
                /* Seek to next pte only makes sense for THP */
-               if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+               if (!PageTransHuge(page))
                        return not_found(pvmw);
+               end = vma_address_end(page, pvmw->vma);
                do {
                        pvmw->address += PAGE_SIZE;
-                       if (pvmw->address >= pvmw->vma->vm_end ||
-                           pvmw->address >=
-                                       __vma_address(pvmw->page, pvmw->vma) +
-                                       thp_size(pvmw->page))
+                       if (pvmw->address >= end)
                                return not_found(pvmw);
                        /* Did we cross page table boundary? */
-                       if (pvmw->address % PMD_SIZE == 0) {
-                               pte_unmap(pvmw->pte);
+                       if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
                                if (pvmw->ptl) {
                                        spin_unlock(pvmw->ptl);
                                        pvmw->ptl = NULL;
                                }
+                               pte_unmap(pvmw->pte);
+                               pvmw->pte = NULL;
                                goto restart;
-                       } else {
-                               pvmw->pte++;
                        }
+                       pvmw->pte++;
                } while (pte_none(*pvmw->pte));
 
                if (!pvmw->ptl) {
@@ -266,14 +279,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
                .vma = vma,
                .flags = PVMW_SYNC,
        };
-       unsigned long start, end;
-
-       start = __vma_address(page, vma);
-       end = start + thp_size(page) - PAGE_SIZE;
 
-       if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+       pvmw.address = vma_address(page, vma);
+       if (pvmw.address == -EFAULT)
                return 0;
-       pvmw.address = max(start, vma->vm_start);
        if (!page_vma_mapped_walk(&pvmw))
                return 0;
        page_vma_mapped_walk_done(&pvmw);