Merge tag 'nfs-for-5.11-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-microblaze.git] / net / mptcp / pm.c
index a8ad205..da2ed57 100644 (file)
 /* path manager command handlers */
 
 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
-                          const struct mptcp_addr_info *addr)
+                          const struct mptcp_addr_info *addr,
+                          bool echo, bool port)
 {
+       u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+
        pr_debug("msk=%p, local_id=%d", msk, addr->id);
 
+       if (add_addr) {
+               pr_warn("addr_signal error, add_addr=%d", add_addr);
+               return -EINVAL;
+       }
+
        msk->pm.local = *addr;
-       WRITE_ONCE(msk->pm.addr_signal, true);
+       add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
+       if (echo)
+               add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
+       if (addr->family == AF_INET6)
+               add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
+       if (port)
+               add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
+       WRITE_ONCE(msk->pm.addr_signal, add_addr);
        return 0;
 }
 
 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
 {
-       return -ENOTSUPP;
+       u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+
+       pr_debug("msk=%p, local_id=%d", msk, local_id);
+
+       if (rm_addr) {
+               pr_warn("addr_signal error, rm_addr=%d", rm_addr);
+               return -EINVAL;
+       }
+
+       msk->pm.rm_id = local_id;
+       rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
+       WRITE_ONCE(msk->pm.addr_signal, rm_addr);
+       return 0;
 }
 
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
 {
-       return -ENOTSUPP;
+       pr_debug("msk=%p, local_id=%d", msk, local_id);
+
+       spin_lock_bh(&msk->pm.lock);
+       mptcp_pm_nl_rm_subflow_received(msk, local_id);
+       spin_unlock_bh(&msk->pm.lock);
+       return 0;
 }
 
 /* path manager event handlers */
@@ -46,7 +78,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
 {
        struct mptcp_pm_data *pm = &msk->pm;
-       int ret;
+       int ret = 0;
 
        pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
                 pm->subflows_max, READ_ONCE(pm->accept_subflow));
@@ -56,9 +88,11 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
                return false;
 
        spin_lock_bh(&pm->lock);
-       ret = pm->subflows < pm->subflows_max;
-       if (ret && ++pm->subflows == pm->subflows_max)
-               WRITE_ONCE(pm->accept_subflow, false);
+       if (READ_ONCE(pm->accept_subflow)) {
+               ret = pm->subflows < pm->subflows_max;
+               if (ret && ++pm->subflows == pm->subflows_max)
+                       WRITE_ONCE(pm->accept_subflow, false);
+       }
        spin_unlock_bh(&pm->lock);
 
        return ret;
@@ -76,8 +110,7 @@ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
                return false;
 
        msk->pm.status |= BIT(new_status);
-       if (schedule_work(&msk->work))
-               sock_hold((struct sock *)msk);
+       mptcp_schedule_work((struct sock *)msk);
        return true;
 }
 
@@ -93,8 +126,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk)
 
        spin_lock_bh(&pm->lock);
 
-       if (READ_ONCE(pm->work_pending))
+       /* mptcp_pm_fully_established() can be invoked by multiple
+        * racing paths - accept() and check_fully_established()
+        * be sure to serve this event only once.
+        */
+       if (READ_ONCE(pm->work_pending) &&
+           !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
                mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
+       msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
 
        spin_unlock_bh(&pm->lock);
 }
@@ -135,38 +174,83 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
        pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
                 READ_ONCE(pm->accept_addr));
 
-       /* avoid acquiring the lock if there is no room for fouther addresses */
-       if (!READ_ONCE(pm->accept_addr))
-               return;
-
        spin_lock_bh(&pm->lock);
 
-       /* be sure there is something to signal re-checking under PM lock */
-       if (READ_ONCE(pm->accept_addr) &&
-           mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
+       if (!READ_ONCE(pm->accept_addr)) {
+               mptcp_pm_announce_addr(msk, addr, true, addr->port);
+               mptcp_pm_add_addr_send_ack(msk);
+       } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
                pm->remote = *addr;
+       }
+
+       spin_unlock_bh(&pm->lock);
+}
+
+void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
+{
+       if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+           !mptcp_pm_should_add_signal_port(msk))
+               return;
+
+       mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
+}
+
+void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
+{
+       struct mptcp_pm_data *pm = &msk->pm;
+
+       pr_debug("msk=%p remote_id=%d", msk, rm_id);
 
+       spin_lock_bh(&pm->lock);
+       mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
+       pm->rm_id = rm_id;
        spin_unlock_bh(&pm->lock);
 }
 
 /* path manager helpers */
 
-bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
-                         struct mptcp_addr_info *saddr)
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+                             struct mptcp_addr_info *saddr, bool *echo, bool *port)
 {
        int ret = false;
 
        spin_lock_bh(&msk->pm.lock);
 
        /* double check after the lock is acquired */
-       if (!mptcp_pm_should_signal(msk))
+       if (!mptcp_pm_should_add_signal(msk))
                goto out_unlock;
 
-       if (remaining < mptcp_add_addr_len(msk->pm.local.family))
+       *echo = mptcp_pm_should_add_signal_echo(msk);
+       *port = mptcp_pm_should_add_signal_port(msk);
+
+       if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
                goto out_unlock;
 
        *saddr = msk->pm.local;
-       WRITE_ONCE(msk->pm.addr_signal, false);
+       WRITE_ONCE(msk->pm.addr_signal, 0);
+       ret = true;
+
+out_unlock:
+       spin_unlock_bh(&msk->pm.lock);
+       return ret;
+}
+
+bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+                            u8 *rm_id)
+{
+       int ret = false;
+
+       spin_lock_bh(&msk->pm.lock);
+
+       /* double check after the lock is acquired */
+       if (!mptcp_pm_should_rm_signal(msk))
+               goto out_unlock;
+
+       if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
+               goto out_unlock;
+
+       *rm_id = msk->pm.rm_id;
+       WRITE_ONCE(msk->pm.addr_signal, 0);
        ret = true;
 
 out_unlock:
@@ -185,13 +269,15 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
        msk->pm.add_addr_accepted = 0;
        msk->pm.local_addr_used = 0;
        msk->pm.subflows = 0;
+       msk->pm.rm_id = 0;
        WRITE_ONCE(msk->pm.work_pending, false);
-       WRITE_ONCE(msk->pm.addr_signal, false);
+       WRITE_ONCE(msk->pm.addr_signal, 0);
        WRITE_ONCE(msk->pm.accept_addr, false);
        WRITE_ONCE(msk->pm.accept_subflow, false);
        msk->pm.status = 0;
 
        spin_lock_init(&msk->pm.lock);
+       INIT_LIST_HEAD(&msk->pm.anno_list);
 
        mptcp_pm_nl_data_init(msk);
 }