[PATCH] x86_64: Check for bad elf entry address.
[linux-2.6-microblaze.git] / mm / msync.c
index 3b5f1c5..3563a56 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-/*
- * Called with mm->page_table_lock held to protect against other
- * threads/the swapper from ripping pte's out from under us.
- */
-
 static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end)
 {
        pte_t *pte;
+       spinlock_t *ptl;
        int progress = 0;
 
 again:
-       pte = pte_offset_map(pmd, addr);
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        do {
-               unsigned long pfn;
                struct page *page;
 
                if (progress >= 64) {
                        progress = 0;
-                       if (need_resched() ||
-                           need_lockbreak(&vma->vm_mm->page_table_lock))
+                       if (need_resched() || need_lockbreak(ptl))
                                break;
                }
                progress++;
@@ -45,20 +39,16 @@ again:
                        continue;
                if (!pte_maybe_dirty(*pte))
                        continue;
-               pfn = pte_pfn(*pte);
-               if (!pfn_valid(pfn))
+               page = vm_normal_page(vma, addr, *pte);
+               if (!page)
                        continue;
-               page = pfn_to_page(pfn);
-               if (PageReserved(page))
-                       continue;
-
                if (ptep_clear_flush_dirty(vma, addr, pte) ||
                    page_test_and_clear_dirty(page))
                        set_page_dirty(page);
                progress += 3;
        } while (pte++, addr += PAGE_SIZE, addr != end);
-       pte_unmap(pte - 1);
-       cond_resched_lock(&vma->vm_mm->page_table_lock);
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
        if (addr != end)
                goto again;
 }
@@ -96,27 +86,25 @@ static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 static void msync_page_range(struct vm_area_struct *vma,
                                unsigned long addr, unsigned long end)
 {
-       struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
        unsigned long next;
 
        /* For hugepages we can't go walking the page table normally,
         * but that's ok, hugetlbfs is memory based, so we don't need
-        * to do anything more on an msync() */
-       if (is_vm_hugetlb_page(vma))
+        * to do anything more on an msync().
+        */
+       if (vma->vm_flags & VM_HUGETLB)
                return;
 
        BUG_ON(addr >= end);
-       pgd = pgd_offset(mm, addr);
+       pgd = pgd_offset(vma->vm_mm, addr);
        flush_cache_range(vma, addr, end);
-       spin_lock(&mm->page_table_lock);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
                msync_pud_range(vma, pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
-       spin_unlock(&mm->page_table_lock);
 }
 
 /*
@@ -149,7 +137,7 @@ static int msync_interval(struct vm_area_struct *vma,
                        ret = filemap_fdatawrite(mapping);
                        if (file->f_op && file->f_op->fsync) {
                                /*
-                                * We don't take i_sem here because mmap_sem
+                                * We don't take i_mutex here because mmap_sem
                                 * is already held.
                                 */
                                err = file->f_op->fsync(file,file->f_dentry,1);