userfaultfd: release page in error path to avoid BUG_ON
[linux-2.6-microblaze.git] / mm / userfaultfd.c
index 9a3d451..e14b382 100644 (file)
@@ -207,7 +207,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
                                              unsigned long dst_start,
                                              unsigned long src_start,
                                              unsigned long len,
-                                             bool zeropage)
+                                             enum mcopy_atomic_mode mode)
 {
        int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
        int vm_shared = dst_vma->vm_flags & VM_SHARED;
@@ -227,7 +227,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
         * by THP.  Since we can not reliably insert a zero page, this
         * feature is not supported.
         */
-       if (zeropage) {
+       if (mode == MCOPY_ATOMIC_ZEROPAGE) {
                mmap_read_unlock(dst_mm);
                return -EINVAL;
        }
@@ -273,8 +273,6 @@ retry:
        }
 
        while (src_addr < src_start + len) {
-               pte_t dst_pteval;
-
                BUG_ON(dst_addr >= dst_start + len);
 
                /*
@@ -290,23 +288,23 @@ retry:
                mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
                err = -ENOMEM;
-               dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
+               dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
                if (!dst_pte) {
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        i_mmap_unlock_read(mapping);
                        goto out_unlock;
                }
 
-               err = -EEXIST;
-               dst_pteval = huge_ptep_get(dst_pte);
-               if (!huge_pte_none(dst_pteval)) {
+               if (mode != MCOPY_ATOMIC_CONTINUE &&
+                   !huge_pte_none(huge_ptep_get(dst_pte))) {
+                       err = -EEXIST;
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        i_mmap_unlock_read(mapping);
                        goto out_unlock;
                }
 
                err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
-                                               dst_addr, src_addr, &page);
+                                              dst_addr, src_addr, mode, &page);
 
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                i_mmap_unlock_read(mapping);
@@ -408,7 +406,7 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
                                      unsigned long dst_start,
                                      unsigned long src_start,
                                      unsigned long len,
-                                     bool zeropage);
+                                     enum mcopy_atomic_mode mode);
 #endif /* CONFIG_HUGETLB_PAGE */
 
 static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
@@ -458,7 +456,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
                                              unsigned long dst_start,
                                              unsigned long src_start,
                                              unsigned long len,
-                                             bool zeropage,
+                                             enum mcopy_atomic_mode mcopy_mode,
                                              bool *mmap_changing,
                                              __u64 mode)
 {
@@ -469,6 +467,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
        long copied;
        struct page *page;
        bool wp_copy;
+       bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE);
 
        /*
         * Sanitize the command parameters:
@@ -527,10 +526,12 @@ retry:
         */
        if (is_vm_hugetlb_page(dst_vma))
                return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
-                                               src_start, len, zeropage);
+                                               src_start, len, mcopy_mode);
 
        if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
                goto out_unlock;
+       if (mcopy_mode == MCOPY_ATOMIC_CONTINUE)
+               goto out_unlock;
 
        /*
         * Ensure the dst_vma has a anon_vma or this page
@@ -626,14 +627,22 @@ ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
                     unsigned long src_start, unsigned long len,
                     bool *mmap_changing, __u64 mode)
 {
-       return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
-                             mmap_changing, mode);
+       return __mcopy_atomic(dst_mm, dst_start, src_start, len,
+                             MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
 }
 
 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
                       unsigned long len, bool *mmap_changing)
 {
-       return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
+       return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
+                             mmap_changing, 0);
+}
+
+ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
+                      unsigned long len, bool *mmap_changing)
+{
+       return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
+                             mmap_changing, 0);
 }
 
 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,