tools/power/x86/intel-speed-select: v1.9 release
[linux-2.6-microblaze.git] / mm / memory.c
index 5da9640..c8e3576 100644 (file)
@@ -2177,11 +2177,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        unsigned long addr, unsigned long end,
                        unsigned long pfn, pgprot_t prot)
 {
-       pte_t *pte;
+       pte_t *pte, *mapped_pte;
        spinlock_t *ptl;
        int err = 0;
 
-       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
        arch_enter_lazy_mmu_mode();
@@ -2195,7 +2195,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(pte - 1, ptl);
+       pte_unmap_unlock(mapped_pte, ptl);
        return err;
 }
 
@@ -2394,18 +2394,18 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     pte_fn_t fn, void *data, bool create,
                                     pgtbl_mod_mask *mask)
 {
-       pte_t *pte;
+       pte_t *pte, *mapped_pte;
        int err = 0;
        spinlock_t *ptl;
 
        if (create) {
-               pte = (mm == &init_mm) ?
+               mapped_pte = pte = (mm == &init_mm) ?
                        pte_alloc_kernel_track(pmd, addr, mask) :
                        pte_alloc_map_lock(mm, pmd, addr, &ptl);
                if (!pte)
                        return -ENOMEM;
        } else {
-               pte = (mm == &init_mm) ?
+               mapped_pte = pte = (mm == &init_mm) ?
                        pte_offset_kernel(pmd, addr) :
                        pte_offset_map_lock(mm, pmd, addr, &ptl);
        }
@@ -2428,7 +2428,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
        arch_leave_lazy_mmu_mode();
 
        if (mm != &init_mm)
-               pte_unmap_unlock(pte-1, ptl);
+               pte_unmap_unlock(mapped_pte, ptl);
        return err;
 }
 
@@ -2902,7 +2902,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
-               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3560,7 +3559,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
-       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3745,8 +3743,6 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
-       else
-               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -5177,17 +5173,19 @@ long copy_huge_page_from_user(struct page *dst_page,
        void *page_kaddr;
        unsigned long i, rc = 0;
        unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+       struct page *subpage = dst_page;
 
-       for (i = 0; i < pages_per_huge_page; i++) {
+       for (i = 0; i < pages_per_huge_page;
+            i++, subpage = mem_map_next(subpage, dst_page, i)) {
                if (allow_pagefault)
-                       page_kaddr = kmap(dst_page + i);
+                       page_kaddr = kmap(subpage);
                else
-                       page_kaddr = kmap_atomic(dst_page + i);
+                       page_kaddr = kmap_atomic(subpage);
                rc = copy_from_user(page_kaddr,
                                (const void __user *)(src + i * PAGE_SIZE),
                                PAGE_SIZE);
                if (allow_pagefault)
-                       kunmap(dst_page + i);
+                       kunmap(subpage);
                else
                        kunmap_atomic(page_kaddr);