1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2019, Intel Corporation.
6 #define pr_fmt(fmt) "MPTCP: " fmt
8 #include <linux/kernel.h>
10 #include <net/mptcp.h>
15 /* path manager command handlers */
17 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
18 const struct mptcp_addr_info *addr,
21 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
23 pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
25 lockdep_assert_held(&msk->pm.lock);
28 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
29 pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo);
34 msk->pm.remote = *addr;
35 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
37 msk->pm.local = *addr;
38 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
40 if (addr->family == AF_INET6)
41 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
43 add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
44 WRITE_ONCE(msk->pm.addr_signal, add_addr);
48 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
50 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
52 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
55 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
59 msk->pm.rm_list_tx = *rm_list;
60 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
61 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
62 mptcp_pm_nl_addr_send_ack(msk);
66 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
68 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
70 spin_lock_bh(&msk->pm.lock);
71 mptcp_pm_nl_rm_subflow_received(msk, rm_list);
72 spin_unlock_bh(&msk->pm.lock);
76 /* path manager event handlers */
78 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
80 struct mptcp_pm_data *pm = &msk->pm;
82 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
84 WRITE_ONCE(pm->server_side, server_side);
85 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
88 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
90 struct mptcp_pm_data *pm = &msk->pm;
91 unsigned int subflows_max;
94 subflows_max = mptcp_pm_get_subflows_max(msk);
96 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
97 subflows_max, READ_ONCE(pm->accept_subflow));
99 /* try to avoid acquiring the lock below */
100 if (!READ_ONCE(pm->accept_subflow))
103 spin_lock_bh(&pm->lock);
104 if (READ_ONCE(pm->accept_subflow)) {
105 ret = pm->subflows < subflows_max;
106 if (ret && ++pm->subflows == subflows_max)
107 WRITE_ONCE(pm->accept_subflow, false);
109 spin_unlock_bh(&pm->lock);
114 /* return true if the new status bit is currently cleared, that is, this event
115 * can be server, eventually by an already scheduled work
117 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
118 enum mptcp_pm_status new_status)
120 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
122 if (msk->pm.status & BIT(new_status))
125 msk->pm.status |= BIT(new_status);
126 mptcp_schedule_work((struct sock *)msk);
130 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp)
132 struct mptcp_pm_data *pm = &msk->pm;
133 bool announce = false;
135 pr_debug("msk=%p", msk);
137 spin_lock_bh(&pm->lock);
139 /* mptcp_pm_fully_established() can be invoked by multiple
140 * racing paths - accept() and check_fully_established()
141 * be sure to serve this event only once.
143 if (READ_ONCE(pm->work_pending) &&
144 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
145 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
147 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
150 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
151 spin_unlock_bh(&pm->lock);
154 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp);
157 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
159 pr_debug("msk=%p", msk);
162 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
164 struct mptcp_pm_data *pm = &msk->pm;
166 pr_debug("msk=%p", msk);
168 if (!READ_ONCE(pm->work_pending))
171 spin_lock_bh(&pm->lock);
173 if (READ_ONCE(pm->work_pending))
174 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
176 spin_unlock_bh(&pm->lock);
179 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
181 pr_debug("msk=%p", msk);
184 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
185 const struct mptcp_addr_info *addr)
187 struct mptcp_pm_data *pm = &msk->pm;
189 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
190 READ_ONCE(pm->accept_addr));
192 mptcp_event_addr_announced(msk, addr);
194 spin_lock_bh(&pm->lock);
196 if (!READ_ONCE(pm->accept_addr)) {
197 mptcp_pm_announce_addr(msk, addr, true);
198 mptcp_pm_add_addr_send_ack(msk);
199 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
203 spin_unlock_bh(&pm->lock);
206 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
207 struct mptcp_addr_info *addr)
209 struct mptcp_pm_data *pm = &msk->pm;
211 pr_debug("msk=%p", msk);
213 spin_lock_bh(&pm->lock);
215 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
216 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
218 spin_unlock_bh(&pm->lock);
221 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
223 if (!mptcp_pm_should_add_signal(msk))
226 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
229 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
230 const struct mptcp_rm_list *rm_list)
232 struct mptcp_pm_data *pm = &msk->pm;
235 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
237 for (i = 0; i < rm_list->nr; i++)
238 mptcp_event_addr_removed(msk, rm_list->ids[i]);
240 spin_lock_bh(&pm->lock);
241 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
242 pm->rm_list_rx = *rm_list;
243 spin_unlock_bh(&pm->lock);
246 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
248 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
250 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
251 subflow->backup = bkup;
253 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC);
256 /* path manager helpers */
258 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb,
259 unsigned int opt_size, unsigned int remaining,
260 struct mptcp_addr_info *saddr, bool *echo,
261 bool *port, bool *drop_other_suboptions)
266 spin_lock_bh(&msk->pm.lock);
268 /* double check after the lock is acquired */
269 if (!mptcp_pm_should_add_signal(msk))
272 /* always drop every other options for pure ack ADD_ADDR; this is a
273 * plain dup-ack from TCP perspective. The other MPTCP-relevant info,
274 * if any, will be carried by the 'original' TCP ack
276 if (skb && skb_is_tcp_pure_ack(skb)) {
277 remaining += opt_size;
278 *drop_other_suboptions = true;
281 *echo = mptcp_pm_should_add_signal_echo(msk);
282 *port = mptcp_pm_should_add_signal_port(msk);
284 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
287 *saddr = msk->pm.local;
289 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
291 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
292 WRITE_ONCE(msk->pm.addr_signal, add_addr);
296 spin_unlock_bh(&msk->pm.lock);
300 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
301 struct mptcp_rm_list *rm_list)
303 int ret = false, len;
306 spin_lock_bh(&msk->pm.lock);
308 /* double check after the lock is acquired */
309 if (!mptcp_pm_should_rm_signal(msk))
312 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
313 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
315 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
321 *rm_list = msk->pm.rm_list_tx;
322 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
326 spin_unlock_bh(&msk->pm.lock);
330 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
332 return mptcp_pm_nl_get_local_id(msk, skc);
335 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
337 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
338 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
340 /* keep track of rtx periods with no progress */
341 if (!subflow->stale_count) {
342 subflow->stale_rcv_tstamp = rcv_tstamp;
343 subflow->stale_count++;
344 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
345 if (subflow->stale_count < U8_MAX)
346 subflow->stale_count++;
347 mptcp_pm_nl_subflow_chk_stale(msk, ssk);
349 subflow->stale_count = 0;
350 mptcp_subflow_set_active(subflow);
354 void mptcp_pm_data_init(struct mptcp_sock *msk)
356 msk->pm.add_addr_signaled = 0;
357 msk->pm.add_addr_accepted = 0;
358 msk->pm.local_addr_used = 0;
359 msk->pm.subflows = 0;
360 msk->pm.rm_list_tx.nr = 0;
361 msk->pm.rm_list_rx.nr = 0;
362 WRITE_ONCE(msk->pm.work_pending, false);
363 WRITE_ONCE(msk->pm.addr_signal, 0);
364 WRITE_ONCE(msk->pm.accept_addr, false);
365 WRITE_ONCE(msk->pm.accept_subflow, false);
366 WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
369 spin_lock_init(&msk->pm.lock);
370 INIT_LIST_HEAD(&msk->pm.anno_list);
372 mptcp_pm_nl_data_init(msk);
375 void __init mptcp_pm_init(void)