ep_insert(): don't open-code ep_remove() on failure exits
authorAl Viro <viro@zeniv.linux.org.uk>
Sat, 26 Sep 2020 22:09:29 +0000 (18:09 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Mon, 26 Oct 2020 00:02:01 +0000 (20:02 -0400)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
fs/eventpoll.c

index f9c567a..c987b61 100644 (file)
@@ -1384,12 +1384,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
        epi->next = EP_UNACTIVE_PTR;
        if (epi->event.events & EPOLLWAKEUP) {
                error = ep_create_wakeup_source(epi);
-               if (error)
-                       goto error_create_wakeup_source;
+               if (error) {
+                       kmem_cache_free(epi_cache, epi);
+                       return error;
+               }
        } else {
                RCU_INIT_POINTER(epi->ws, NULL);
        }
 
+       atomic_long_inc(&ep->user->epoll_watches);
+
        /* Add the current item to the list of active epoll hook for this file */
        spin_lock(&tfile->f_lock);
        list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
@@ -1402,9 +1406,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
        ep_rbtree_insert(ep, epi);
 
        /* now check if we've created too many backpaths */
-       error = -EINVAL;
-       if (full_check && reverse_path_check())
-               goto error_remove_epi;
+       if (unlikely(full_check && reverse_path_check())) {
+               ep_remove(ep, epi);
+               return -EINVAL;
+       }
 
        /* Initialize the poll table using the queue callback */
        epq.epi = epi;
@@ -1424,9 +1429,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
         * install process. Namely an allocation for a wait queue failed due
         * high memory pressure.
         */
-       error = -ENOMEM;
-       if (!epq.epi)
-               goto error_unregister;
+       if (unlikely(!epq.epi)) {
+               ep_remove(ep, epi);
+               return -ENOMEM;
+       }
 
        /* We have to drop the new item inside our item list to keep track of it */
        write_lock_irq(&ep->lock);
@@ -1448,40 +1454,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
 
        write_unlock_irq(&ep->lock);
 
-       atomic_long_inc(&ep->user->epoll_watches);
-
        /* We have to call this outside the lock */
        if (pwake)
                ep_poll_safewake(ep, NULL);
 
        return 0;
-
-error_unregister:
-       ep_unregister_pollwait(ep, epi);
-error_remove_epi:
-       spin_lock(&tfile->f_lock);
-       list_del_rcu(&epi->fllink);
-       spin_unlock(&tfile->f_lock);
-
-       rb_erase_cached(&epi->rbn, &ep->rbr);
-
-       /*
-        * We need to do this because an event could have been arrived on some
-        * allocated wait queue. Note that we don't care about the ep->ovflist
-        * list, since that is used/cleaned only inside a section bound by "mtx".
-        * And ep_insert() is called with "mtx" held.
-        */
-       write_lock_irq(&ep->lock);
-       if (ep_is_linked(epi))
-               list_del_init(&epi->rdllink);
-       write_unlock_irq(&ep->lock);
-
-       wakeup_source_unregister(ep_wakeup_source(epi));
-
-error_create_wakeup_source:
-       kmem_cache_free(epi_cache, epi);
-
-       return error;
 }
 
 /*