mptcp: use mptcp release_cb for delayed tasks
[linux-2.6-microblaze.git] / net / mptcp / options.c
index 3986454..6b7b4b6 100644 (file)
@@ -830,7 +830,7 @@ static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
 }
 
 static void ack_update_msk(struct mptcp_sock *msk,
-                          const struct sock *ssk,
+                          struct sock *ssk,
                           struct mptcp_options_received *mp_opt)
 {
        u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
@@ -854,8 +854,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
 
        if (after64(new_wnd_end, msk->wnd_end)) {
                msk->wnd_end = new_wnd_end;
-               if (mptcp_send_head(sk))
-                       mptcp_schedule_work(sk);
+               __mptcp_wnd_updated(sk, ssk);
        }
 
        if (after64(new_snd_una, old_snd_una)) {
@@ -915,8 +914,19 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
        struct mptcp_options_received mp_opt;
        struct mptcp_ext *mpext;
 
-       if (__mptcp_check_fallback(msk))
+       if (__mptcp_check_fallback(msk)) {
+               /* Keep it simple and unconditionally trigger send data cleanup and
+                * pending queue spooling. We will need to acquire the data lock
+                * for more accurate checks, and once the lock is acquired, such
+                * helpers are cheap.
+                */
+               mptcp_data_lock(subflow->conn);
+               if (mptcp_send_head(subflow->conn))
+                       __mptcp_wnd_updated(subflow->conn, sk);
+               __mptcp_data_acked(subflow->conn);
+               mptcp_data_unlock(subflow->conn);
                return;
+       }
 
        mptcp_get_options(skb, &mp_opt);
        if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))