sparc64: get rid of odd callers of copy_regset_to_user()
[linux-2.6-microblaze.git] / fs / userfaultfd.c
index e39fdec..52de290 100644 (file)
@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
        pte_t *ptep, pte;
        bool ret = true;
 
-       VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+       mmap_assert_locked(mm);
 
        ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 
@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
        pte_t *pte;
        bool ret = true;
 
-       VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+       mmap_assert_locked(mm);
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
@@ -369,13 +369,13 @@ static inline bool userfaultfd_signal_pending(unsigned int flags)
  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
  * recommendation in __lock_page_or_retry is not an understatement.
  *
- * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
+ * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
  * not set.
  *
  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
  * set, VM_FAULT_RETRY can still be returned if and only if there are
- * fatal_signal_pending()s, and the mmap_sem must be released before
+ * fatal_signal_pending()s, and the mmap_lock must be released before
  * returning it.
  */
 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
@@ -396,16 +396,16 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
         * the no_page_table() helper in follow_page_mask(), but the
         * shmem_vm_ops->fault method is invoked even during
-        * coredumping without mmap_sem and it ends up here.
+        * coredumping without mmap_lock and it ends up here.
         */
        if (current->flags & (PF_EXITING|PF_DUMPCORE))
                goto out;
 
        /*
-        * Coredumping runs without mmap_sem so we can only check that
-        * the mmap_sem is held, if PF_DUMPCORE was not set.
+        * Coredumping runs without mmap_lock so we can only check that
+        * the mmap_lock is held, if PF_DUMPCORE was not set.
         */
-       WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+       mmap_assert_locked(mm);
 
        ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
        if (!ctx)
@@ -422,7 +422,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
        /*
         * If it's already released don't get it. This avoids to loop
         * in __get_user_pages if userfaultfd_release waits on the
-        * caller of handle_userfault to release the mmap_sem.
+        * caller of handle_userfault to release the mmap_lock.
         */
        if (unlikely(READ_ONCE(ctx->released))) {
                /*
@@ -481,7 +481,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
        if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
                goto out;
 
-       /* take the reference before dropping the mmap_sem */
+       /* take the reference before dropping the mmap_lock */
        userfaultfd_ctx_get(ctx);
 
        init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
@@ -514,7 +514,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
                must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
                                                       vmf->address,
                                                       vmf->flags, reason);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        if (likely(must_wait && !READ_ONCE(ctx->released) &&
                   !userfaultfd_signal_pending(vmf->flags))) {
@@ -637,7 +637,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                struct mm_struct *mm = release_new_ctx->mm;
 
                /* the various vma->vm_userfaultfd_ctx still points to it */
-               down_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
                /* no task can run (and in turn coredump) yet */
                VM_WARN_ON(!mmget_still_valid(mm));
                for (vma = mm->mmap; vma; vma = vma->vm_next)
@@ -645,7 +645,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
                                vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
                        }
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
 
                userfaultfd_ctx_put(release_new_ctx);
        }
@@ -799,7 +799,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
 
        userfaultfd_ctx_get(ctx);
        WRITE_ONCE(ctx->mmap_changing, true);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        msg_init(&ewq.msg);
 
@@ -890,11 +890,11 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * Flush page faults out of all CPUs. NOTE: all page faults
         * must be retried without returning VM_FAULT_SIGBUS if
         * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
-        * changes while handle_userfault released the mmap_sem. So
+        * changes while handle_userfault released the mmap_lock. So
         * it's critical that released is set to true (above), before
-        * taking the mmap_sem for writing.
+        * taking the mmap_lock for writing.
         */
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        still_valid = mmget_still_valid(mm);
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -920,7 +920,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        mmput(mm);
 wakeup:
        /*
@@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
        /*
         * To be sure waitqueue_active() is not reordered by the CPU
         * before the pagetable update, use an explicit SMP memory
-        * barrier here. PT lock release or up_read(mmap_sem) still
+        * barrier here. PT lock release or mmap_read_unlock(mm) still
         * have release semantics that can allow the
         * waitqueue_active() to be reordered before the pte update.
         */
@@ -1345,7 +1345,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
        if (!mmget_not_zero(mm))
                goto out;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        if (!mmget_still_valid(mm))
                goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
@@ -1492,7 +1492,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
 out_unlock:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        mmput(mm);
        if (!ret) {
                __u64 ioctls_out;
@@ -1547,7 +1547,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
        if (!mmget_not_zero(mm))
                goto out;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        if (!mmget_still_valid(mm))
                goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
@@ -1664,7 +1664,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
 out_unlock:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        mmput(mm);
 out:
        return ret;