mm/z3fold.c: reinitialize zhdr structs after migration
[linux-2.6-microblaze.git] / mm / memory.c
index ddf20bd..89325f9 100644 (file)
@@ -571,8 +571,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  * PFNMAP mappings in order to support COWable mappings.
  *
  */
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-                            pte_t pte, bool with_public_device)
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                           pte_t pte)
 {
        unsigned long pfn = pte_pfn(pte);
 
@@ -585,29 +585,6 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                        return NULL;
                if (is_zero_pfn(pfn))
                        return NULL;
-
-               /*
-                * Device public pages are special pages (they are ZONE_DEVICE
-                * pages but different from persistent memory). They behave
-                * allmost like normal pages. The difference is that they are
-                * not on the lru and thus should never be involve with any-
-                * thing that involve lru manipulation (mlock, numa balancing,
-                * ...).
-                *
-                * This is why we still want to return NULL for such page from
-                * vm_normal_page() so that we do not have to special case all
-                * call site of vm_normal_page().
-                */
-               if (likely(pfn <= highest_memmap_pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-
-                       if (is_device_public_page(page)) {
-                               if (with_public_device)
-                                       return page;
-                               return NULL;
-                       }
-               }
-
                if (pte_devmap(pte))
                        return NULL;
 
@@ -797,17 +774,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                rss[mm_counter(page)]++;
        } else if (pte_devmap(pte)) {
                page = pte_page(pte);
-
-               /*
-                * Cache coherent device memory behave like regular page and
-                * not like persistent memory page. For more informations see
-                * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
-                */
-               if (is_device_public_page(page)) {
-                       get_page(page);
-                       page_dup_rmap(page, false);
-                       rss[mm_counter(page)]++;
-               }
        }
 
 out_set_pte:
@@ -1063,7 +1029,7 @@ again:
                if (pte_present(ptent)) {
                        struct page *page;
 
-                       page = _vm_normal_page(vma, addr, ptent, true);
+                       page = vm_normal_page(vma, addr, ptent);
                        if (unlikely(details) && page) {
                                /*
                                 * unmap_shared_mapping_pages() wants to
@@ -1475,8 +1441,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
        retval = 0;
-       pte_unmap_unlock(pte, ptl);
-       return retval;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
 out:
@@ -1547,7 +1511,7 @@ static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
        int ret, i;
 
        /* Fail if the user requested offset is beyond the end of the object */
-       if (offset > num)
+       if (offset >= num)
                return -ENXIO;
 
        /* Fail if the user requested size exceeds available object size */
@@ -2038,7 +2002,6 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 {
        pte_t *pte;
        int err;
-       pgtable_t token;
        spinlock_t *uninitialized_var(ptl);
 
        pte = (mm == &init_mm) ?
@@ -2051,10 +2014,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
        arch_enter_lazy_mmu_mode();
 
-       token = pmd_pgtable(*pmd);
-
        do {
-               err = fn(pte++, token, addr, data);
+               err = fn(pte++, addr, data);
                if (err)
                        break;
        } while (addr += PAGE_SIZE, addr != end);
@@ -2782,13 +2743,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                        migration_entry_wait(vma->vm_mm, vmf->pmd,
                                             vmf->address);
                } else if (is_device_private_entry(entry)) {
-                       /*
-                        * For un-addressable device memory we call the pgmap
-                        * fault handler callback. The callback must migrate
-                        * the page back to some CPU accessible page.
-                        */
-                       ret = device_private_entry_fault(vma, vmf->address, entry,
-                                                vmf->flags, vmf->pmd);
+                       vmf->page = device_private_entry_to_page(entry);
+                       ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
                } else if (is_hwpoison_entry(entry)) {
                        ret = VM_FAULT_HWPOISON;
                } else {
@@ -2807,7 +2763,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                struct swap_info_struct *si = swp_swap_info(entry);
 
                if (si->flags & SWP_SYNCHRONOUS_IO &&
-                               __swap_count(si, entry) == 1) {
+                               __swap_count(entry) == 1) {
                        /* skip swapcache */
                        page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
                                                        vmf->address);
@@ -4349,7 +4305,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
        void *old_buf = buf;
        int write = gup_flags & FOLL_WRITE;
 
-       down_read(&mm->mmap_sem);
+       if (down_read_killable(&mm->mmap_sem))
+               return 0;
+
        /* ignore errors, just check how much was successfully transferred */
        while (len) {
                int bytes, ret, offset;