Merge tag 'locking-urgent-2020-08-10' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / fs / userfaultfd.c
index 52de290..0e4a383 100644 (file)
@@ -61,7 +61,7 @@ struct userfaultfd_ctx {
        /* waitqueue head for events */
        wait_queue_head_t event_wqh;
        /* a refile sequence protected by fault_pending_wqh lock */
-       struct seqcount refile_seq;
+       seqcount_spinlock_t refile_seq;
        /* pseudo fd refcounting */
        refcount_t refcount;
        /* userfaultfd syscall flags */
@@ -339,7 +339,6 @@ out:
        return ret;
 }
 
-/* Should pair with userfaultfd_signal_pending() */
 static inline long userfaultfd_get_blocking_state(unsigned int flags)
 {
        if (flags & FAULT_FLAG_INTERRUPTIBLE)
@@ -351,18 +350,6 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags)
        return TASK_UNINTERRUPTIBLE;
 }
 
-/* Should pair with userfaultfd_get_blocking_state() */
-static inline bool userfaultfd_signal_pending(unsigned int flags)
-{
-       if (flags & FAULT_FLAG_INTERRUPTIBLE)
-               return signal_pending(current);
-
-       if (flags & FAULT_FLAG_KILLABLE)
-               return fatal_signal_pending(current);
-
-       return false;
-}
-
 /*
  * The locking rules involved in returning VM_FAULT_RETRY depending on
  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
@@ -516,33 +503,9 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
                                                       vmf->flags, reason);
        mmap_read_unlock(mm);
 
-       if (likely(must_wait && !READ_ONCE(ctx->released) &&
-                  !userfaultfd_signal_pending(vmf->flags))) {
+       if (likely(must_wait && !READ_ONCE(ctx->released))) {
                wake_up_poll(&ctx->fd_wqh, EPOLLIN);
                schedule();
-               ret |= VM_FAULT_MAJOR;
-
-               /*
-                * False wakeups can orginate even from rwsem before
-                * up_read() however userfaults will wait either for a
-                * targeted wakeup on the specific uwq waitqueue from
-                * wake_userfault() or for signals or for uffd
-                * release.
-                */
-               while (!READ_ONCE(uwq.waken)) {
-                       /*
-                        * This needs the full smp_store_mb()
-                        * guarantee as the state write must be
-                        * visible to other CPUs before reading
-                        * uwq.waken from other CPUs.
-                        */
-                       set_current_state(blocking_state);
-                       if (READ_ONCE(uwq.waken) ||
-                           READ_ONCE(ctx->released) ||
-                           userfaultfd_signal_pending(vmf->flags))
-                               break;
-                       schedule();
-               }
        }
 
        __set_current_state(TASK_RUNNING);
@@ -1998,7 +1961,7 @@ static void init_once_userfaultfd_ctx(void *mem)
        init_waitqueue_head(&ctx->fault_wqh);
        init_waitqueue_head(&ctx->event_wqh);
        init_waitqueue_head(&ctx->fd_wqh);
-       seqcount_init(&ctx->refile_seq);
+       seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
 }
 
 SYSCALL_DEFINE1(userfaultfd, int, flags)