1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2019, Intel Corporation.
6 #define pr_fmt(fmt) "MPTCP: " fmt
8 #include <linux/kernel.h>
10 #include <net/mptcp.h>
15 /* path manager command handlers */
17 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
18 const struct mptcp_addr_info *addr,
21 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
23 pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
25 lockdep_assert_held(&msk->pm.lock);
28 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
29 pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo);
34 msk->pm.remote = *addr;
35 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
37 msk->pm.local = *addr;
38 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
40 WRITE_ONCE(msk->pm.addr_signal, add_addr);
44 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
46 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
48 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
51 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
55 msk->pm.rm_list_tx = *rm_list;
56 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
57 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
58 mptcp_pm_nl_addr_send_ack(msk);
62 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
64 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
66 spin_lock_bh(&msk->pm.lock);
67 mptcp_pm_nl_rm_subflow_received(msk, rm_list);
68 spin_unlock_bh(&msk->pm.lock);
72 /* path manager event handlers */
74 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
76 struct mptcp_pm_data *pm = &msk->pm;
78 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
80 WRITE_ONCE(pm->server_side, server_side);
81 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
84 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
86 struct mptcp_pm_data *pm = &msk->pm;
87 unsigned int subflows_max;
90 subflows_max = mptcp_pm_get_subflows_max(msk);
92 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
93 subflows_max, READ_ONCE(pm->accept_subflow));
95 /* try to avoid acquiring the lock below */
96 if (!READ_ONCE(pm->accept_subflow))
99 spin_lock_bh(&pm->lock);
100 if (READ_ONCE(pm->accept_subflow)) {
101 ret = pm->subflows < subflows_max;
102 if (ret && ++pm->subflows == subflows_max)
103 WRITE_ONCE(pm->accept_subflow, false);
105 spin_unlock_bh(&pm->lock);
110 /* return true if the new status bit is currently cleared, that is, this event
111 * can be server, eventually by an already scheduled work
113 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
114 enum mptcp_pm_status new_status)
116 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
118 if (msk->pm.status & BIT(new_status))
121 msk->pm.status |= BIT(new_status);
122 mptcp_schedule_work((struct sock *)msk);
126 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp)
128 struct mptcp_pm_data *pm = &msk->pm;
129 bool announce = false;
131 pr_debug("msk=%p", msk);
133 spin_lock_bh(&pm->lock);
135 /* mptcp_pm_fully_established() can be invoked by multiple
136 * racing paths - accept() and check_fully_established()
137 * be sure to serve this event only once.
139 if (READ_ONCE(pm->work_pending) &&
140 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
141 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
143 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
146 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
147 spin_unlock_bh(&pm->lock);
150 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp);
153 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
155 pr_debug("msk=%p", msk);
158 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
160 struct mptcp_pm_data *pm = &msk->pm;
162 pr_debug("msk=%p", msk);
164 if (!READ_ONCE(pm->work_pending))
167 spin_lock_bh(&pm->lock);
169 if (READ_ONCE(pm->work_pending))
170 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
172 spin_unlock_bh(&pm->lock);
175 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
176 const struct mptcp_subflow_context *subflow)
178 struct mptcp_pm_data *pm = &msk->pm;
179 bool update_subflows;
181 update_subflows = subflow->request_join || subflow->mp_join;
182 if (!READ_ONCE(pm->work_pending) && !update_subflows)
185 spin_lock_bh(&pm->lock);
187 __mptcp_pm_close_subflow(msk);
189 /* Even if this subflow is not really established, tell the PM to try
190 * to pick the next ones, if possible.
192 if (mptcp_pm_nl_check_work_pending(msk))
193 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
195 spin_unlock_bh(&pm->lock);
198 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
199 const struct mptcp_addr_info *addr)
201 struct mptcp_pm_data *pm = &msk->pm;
203 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
204 READ_ONCE(pm->accept_addr));
206 mptcp_event_addr_announced(msk, addr);
208 spin_lock_bh(&pm->lock);
210 if (!READ_ONCE(pm->accept_addr)) {
211 mptcp_pm_announce_addr(msk, addr, true);
212 mptcp_pm_add_addr_send_ack(msk);
213 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
216 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
219 spin_unlock_bh(&pm->lock);
222 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
223 const struct mptcp_addr_info *addr)
225 struct mptcp_pm_data *pm = &msk->pm;
227 pr_debug("msk=%p", msk);
229 spin_lock_bh(&pm->lock);
231 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
232 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
234 spin_unlock_bh(&pm->lock);
237 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
239 if (!mptcp_pm_should_add_signal(msk))
242 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
245 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
246 const struct mptcp_rm_list *rm_list)
248 struct mptcp_pm_data *pm = &msk->pm;
251 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
253 for (i = 0; i < rm_list->nr; i++)
254 mptcp_event_addr_removed(msk, rm_list->ids[i]);
256 spin_lock_bh(&pm->lock);
257 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
258 pm->rm_list_rx = *rm_list;
260 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
261 spin_unlock_bh(&pm->lock);
264 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
266 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
268 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
269 subflow->backup = bkup;
271 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC);
274 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
276 pr_debug("fail_seq=%llu", fail_seq);
279 /* path manager helpers */
281 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
282 unsigned int opt_size, unsigned int remaining,
283 struct mptcp_addr_info *addr, bool *echo,
284 bool *drop_other_suboptions)
291 spin_lock_bh(&msk->pm.lock);
293 /* double check after the lock is acquired */
294 if (!mptcp_pm_should_add_signal(msk))
297 /* always drop every other options for pure ack ADD_ADDR; this is a
298 * plain dup-ack from TCP perspective. The other MPTCP-relevant info,
299 * if any, will be carried by the 'original' TCP ack
301 if (skb && skb_is_tcp_pure_ack(skb)) {
302 remaining += opt_size;
303 *drop_other_suboptions = true;
306 *echo = mptcp_pm_should_add_signal_echo(msk);
307 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
309 family = *echo ? msk->pm.remote.family : msk->pm.local.family;
310 if (remaining < mptcp_add_addr_len(family, *echo, port))
314 *addr = msk->pm.remote;
315 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
317 *addr = msk->pm.local;
318 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
320 WRITE_ONCE(msk->pm.addr_signal, add_addr);
324 spin_unlock_bh(&msk->pm.lock);
328 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
329 struct mptcp_rm_list *rm_list)
331 int ret = false, len;
334 spin_lock_bh(&msk->pm.lock);
336 /* double check after the lock is acquired */
337 if (!mptcp_pm_should_rm_signal(msk))
340 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
341 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
343 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
349 *rm_list = msk->pm.rm_list_tx;
350 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
354 spin_unlock_bh(&msk->pm.lock);
358 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
360 return mptcp_pm_nl_get_local_id(msk, skc);
363 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
365 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
366 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
368 /* keep track of rtx periods with no progress */
369 if (!subflow->stale_count) {
370 subflow->stale_rcv_tstamp = rcv_tstamp;
371 subflow->stale_count++;
372 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
373 if (subflow->stale_count < U8_MAX)
374 subflow->stale_count++;
375 mptcp_pm_nl_subflow_chk_stale(msk, ssk);
377 subflow->stale_count = 0;
378 mptcp_subflow_set_active(subflow);
382 void mptcp_pm_data_reset(struct mptcp_sock *msk)
384 msk->pm.add_addr_signaled = 0;
385 msk->pm.add_addr_accepted = 0;
386 msk->pm.local_addr_used = 0;
387 msk->pm.subflows = 0;
388 msk->pm.rm_list_tx.nr = 0;
389 msk->pm.rm_list_rx.nr = 0;
390 WRITE_ONCE(msk->pm.work_pending, false);
391 WRITE_ONCE(msk->pm.addr_signal, 0);
392 WRITE_ONCE(msk->pm.accept_addr, false);
393 WRITE_ONCE(msk->pm.accept_subflow, false);
394 WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
396 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
398 mptcp_pm_nl_data_init(msk);
401 void mptcp_pm_data_init(struct mptcp_sock *msk)
403 spin_lock_init(&msk->pm.lock);
404 INIT_LIST_HEAD(&msk->pm.anno_list);
405 mptcp_pm_data_reset(msk);
408 void __init mptcp_pm_init(void)