ARC: mm: disintegrate pgtable.h into levels and flags
[linux-2.6-microblaze.git] / mm / page_vma_mapped.c
index e37bd43..f7b3310 100644 (file)
@@ -41,7 +41,8 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
 
                                /* Handle un-addressable ZONE_DEVICE memory */
                                entry = pte_to_swp_entry(*pvmw->pte);
-                               if (!is_device_private_entry(entry))
+                               if (!is_device_private_entry(entry) &&
+                                   !is_device_exclusive_entry(entry))
                                        return false;
                        } else if (!pte_present(*pvmw->pte))
                                return false;
@@ -93,19 +94,21 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
                        return false;
                entry = pte_to_swp_entry(*pvmw->pte);
 
-               if (!is_migration_entry(entry))
+               if (!is_migration_entry(entry) &&
+                   !is_device_exclusive_entry(entry))
                        return false;
 
-               pfn = migration_entry_to_pfn(entry);
+               pfn = swp_offset(entry);
        } else if (is_swap_pte(*pvmw->pte)) {
                swp_entry_t entry;
 
                /* Handle un-addressable ZONE_DEVICE memory */
                entry = pte_to_swp_entry(*pvmw->pte);
-               if (!is_device_private_entry(entry))
+               if (!is_device_private_entry(entry) &&
+                   !is_device_exclusive_entry(entry))
                        return false;
 
-               pfn = device_private_entry_to_pfn(entry);
+               pfn = swp_offset(entry);
        } else {
                if (!pte_present(*pvmw->pte))
                        return false;
@@ -116,6 +119,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
        return pfn_is_match(pvmw->page, pfn);
 }
 
+static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
+{
+       pvmw->address = (pvmw->address + size) & ~(size - 1);
+       if (!pvmw->address)
+               pvmw->address = ULONG_MAX;
+}
+
 /**
  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
  * @pvmw->address
@@ -144,6 +154,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 {
        struct mm_struct *mm = pvmw->vma->vm_mm;
        struct page *page = pvmw->page;
+       unsigned long end;
        pgd_t *pgd;
        p4d_t *p4d;
        pud_t *pud;
@@ -153,10 +164,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
        if (pvmw->pmd && !pvmw->pte)
                return not_found(pvmw);
 
-       if (pvmw->pte)
-               goto next_pte;
+       if (unlikely(PageHuge(page))) {
+               /* The only possible mapping was handled on last iteration */
+               if (pvmw->pte)
+                       return not_found(pvmw);
 
-       if (unlikely(PageHuge(pvmw->page))) {
                /* when pud is not present, pte will be NULL */
                pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
                if (!pvmw->pte)
@@ -168,89 +180,108 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                        return not_found(pvmw);
                return true;
        }
-restart:
-       pgd = pgd_offset(mm, pvmw->address);
-       if (!pgd_present(*pgd))
-               return false;
-       p4d = p4d_offset(pgd, pvmw->address);
-       if (!p4d_present(*p4d))
-               return false;
-       pud = pud_offset(p4d, pvmw->address);
-       if (!pud_present(*pud))
-               return false;
-       pvmw->pmd = pmd_offset(pud, pvmw->address);
+
        /*
-        * Make sure the pmd value isn't cached in a register by the
-        * compiler and used as a stale value after we've observed a
-        * subsequent update.
+        * Seek to next pte only makes sense for THP.
+        * But more important than that optimization, is to filter out
+        * any PageKsm page: whose page->index misleads vma_address()
+        * and vma_address_end() to disaster.
         */
-       pmde = READ_ONCE(*pvmw->pmd);
-       if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
-               pvmw->ptl = pmd_lock(mm, pvmw->pmd);
-               if (likely(pmd_trans_huge(*pvmw->pmd))) {
-                       if (pvmw->flags & PVMW_MIGRATION)
-                               return not_found(pvmw);
-                       if (pmd_page(*pvmw->pmd) != page)
-                               return not_found(pvmw);
-                       return true;
-               } else if (!pmd_present(*pvmw->pmd)) {
-                       if (thp_migration_supported()) {
-                               if (!(pvmw->flags & PVMW_MIGRATION))
+       end = PageTransCompound(page) ?
+               vma_address_end(page, pvmw->vma) :
+               pvmw->address + PAGE_SIZE;
+       if (pvmw->pte)
+               goto next_pte;
+restart:
+       do {
+               pgd = pgd_offset(mm, pvmw->address);
+               if (!pgd_present(*pgd)) {
+                       step_forward(pvmw, PGDIR_SIZE);
+                       continue;
+               }
+               p4d = p4d_offset(pgd, pvmw->address);
+               if (!p4d_present(*p4d)) {
+                       step_forward(pvmw, P4D_SIZE);
+                       continue;
+               }
+               pud = pud_offset(p4d, pvmw->address);
+               if (!pud_present(*pud)) {
+                       step_forward(pvmw, PUD_SIZE);
+                       continue;
+               }
+
+               pvmw->pmd = pmd_offset(pud, pvmw->address);
+               /*
+                * Make sure the pmd value isn't cached in a register by the
+                * compiler and used as a stale value after we've observed a
+                * subsequent update.
+                */
+               pmde = READ_ONCE(*pvmw->pmd);
+
+               if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+                       pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+                       pmde = *pvmw->pmd;
+                       if (likely(pmd_trans_huge(pmde))) {
+                               if (pvmw->flags & PVMW_MIGRATION)
                                        return not_found(pvmw);
-                               if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
-                                       swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
+                               if (pmd_page(pmde) != page)
+                                       return not_found(pvmw);
+                               return true;
+                       }
+                       if (!pmd_present(pmde)) {
+                               swp_entry_t entry;
 
-                                       if (migration_entry_to_page(entry) != page)
-                                               return not_found(pvmw);
-                                       return true;
-                               }
+                               if (!thp_migration_supported() ||
+                                   !(pvmw->flags & PVMW_MIGRATION))
+                                       return not_found(pvmw);
+                               entry = pmd_to_swp_entry(pmde);
+                               if (!is_migration_entry(entry) ||
+                                   pfn_swap_entry_to_page(entry) != page)
+                                       return not_found(pvmw);
+                               return true;
                        }
-                       return not_found(pvmw);
-               } else {
                        /* THP pmd was split under us: handle on pte level */
                        spin_unlock(pvmw->ptl);
                        pvmw->ptl = NULL;
-               }
-       } else if (!pmd_present(pmde)) {
-               /*
-                * If PVMW_SYNC, take and drop THP pmd lock so that we
-                * cannot return prematurely, while zap_huge_pmd() has
-                * cleared *pmd but not decremented compound_mapcount().
-                */
-               if ((pvmw->flags & PVMW_SYNC) &&
-                   PageTransCompound(pvmw->page)) {
-                       spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+               } else if (!pmd_present(pmde)) {
+                       /*
+                        * If PVMW_SYNC, take and drop THP pmd lock so that we
+                        * cannot return prematurely, while zap_huge_pmd() has
+                        * cleared *pmd but not decremented compound_mapcount().
+                        */
+                       if ((pvmw->flags & PVMW_SYNC) &&
+                           PageTransCompound(page)) {
+                               spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
 
-                       spin_unlock(ptl);
+                               spin_unlock(ptl);
+                       }
+                       step_forward(pvmw, PMD_SIZE);
+                       continue;
                }
-               return false;
-       }
-       if (!map_pte(pvmw))
-               goto next_pte;
-       while (1) {
-               unsigned long end;
-
+               if (!map_pte(pvmw))
+                       goto next_pte;
+this_pte:
                if (check_pte(pvmw))
                        return true;
 next_pte:
-               /* Seek to next pte only makes sense for THP */
-               if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
-                       return not_found(pvmw);
-               end = vma_address_end(pvmw->page, pvmw->vma);
                do {
                        pvmw->address += PAGE_SIZE;
                        if (pvmw->address >= end)
                                return not_found(pvmw);
                        /* Did we cross page table boundary? */
-                       if (pvmw->address % PMD_SIZE == 0) {
-                               pte_unmap(pvmw->pte);
+                       if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
                                if (pvmw->ptl) {
                                        spin_unlock(pvmw->ptl);
                                        pvmw->ptl = NULL;
                                }
+                               pte_unmap(pvmw->pte);
+                               pvmw->pte = NULL;
                                goto restart;
-                       } else {
-                               pvmw->pte++;
+                       }
+                       pvmw->pte++;
+                       if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
+                               pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+                               spin_lock(pvmw->ptl);
                        }
                } while (pte_none(*pvmw->pte));
 
@@ -258,7 +289,10 @@ next_pte:
                        pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
                        spin_lock(pvmw->ptl);
                }
-       }
+               goto this_pte;
+       } while (pvmw->address < end);
+
+       return false;
 }
 
 /**