1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2019, Intel Corporation.
6 #define pr_fmt(fmt) "MPTCP: " fmt
8 #include <linux/kernel.h>
10 #include <net/mptcp.h>
13 /* path manager command handlers */
15 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
16 const struct mptcp_addr_info *addr,
19 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
21 pr_debug("msk=%p, local_id=%d", msk, addr->id);
24 pr_warn("addr_signal error, add_addr=%d", add_addr);
28 msk->pm.local = *addr;
29 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
31 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
32 if (addr->family == AF_INET6)
33 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
35 add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
36 WRITE_ONCE(msk->pm.addr_signal, add_addr);
40 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
42 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
44 pr_debug("msk=%p, local_id=%d", msk, local_id);
47 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
51 msk->pm.rm_id = local_id;
52 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
53 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
57 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
59 pr_debug("msk=%p, local_id=%d", msk, local_id);
61 spin_lock_bh(&msk->pm.lock);
62 mptcp_pm_nl_rm_subflow_received(msk, local_id);
63 spin_unlock_bh(&msk->pm.lock);
67 /* path manager event handlers */
69 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
71 struct mptcp_pm_data *pm = &msk->pm;
73 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
75 WRITE_ONCE(pm->server_side, server_side);
78 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
80 struct mptcp_pm_data *pm = &msk->pm;
81 unsigned int subflows_max;
84 subflows_max = mptcp_pm_get_subflows_max(msk);
86 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
87 subflows_max, READ_ONCE(pm->accept_subflow));
89 /* try to avoid acquiring the lock below */
90 if (!READ_ONCE(pm->accept_subflow))
93 spin_lock_bh(&pm->lock);
94 if (READ_ONCE(pm->accept_subflow)) {
95 ret = pm->subflows < subflows_max;
96 if (ret && ++pm->subflows == subflows_max)
97 WRITE_ONCE(pm->accept_subflow, false);
99 spin_unlock_bh(&pm->lock);
104 /* return true if the new status bit is currently cleared, that is, this event
105 * can be server, eventually by an already scheduled work
107 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
108 enum mptcp_pm_status new_status)
110 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
112 if (msk->pm.status & BIT(new_status))
115 msk->pm.status |= BIT(new_status);
116 mptcp_schedule_work((struct sock *)msk);
120 void mptcp_pm_fully_established(struct mptcp_sock *msk)
122 struct mptcp_pm_data *pm = &msk->pm;
124 pr_debug("msk=%p", msk);
126 /* try to avoid acquiring the lock below */
127 if (!READ_ONCE(pm->work_pending))
130 spin_lock_bh(&pm->lock);
132 /* mptcp_pm_fully_established() can be invoked by multiple
133 * racing paths - accept() and check_fully_established()
134 * be sure to serve this event only once.
136 if (READ_ONCE(pm->work_pending) &&
137 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
138 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
139 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
141 spin_unlock_bh(&pm->lock);
144 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
146 pr_debug("msk=%p", msk);
149 void mptcp_pm_subflow_established(struct mptcp_sock *msk,
150 struct mptcp_subflow_context *subflow)
152 struct mptcp_pm_data *pm = &msk->pm;
154 pr_debug("msk=%p", msk);
156 if (!READ_ONCE(pm->work_pending))
159 spin_lock_bh(&pm->lock);
161 if (READ_ONCE(pm->work_pending))
162 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
164 spin_unlock_bh(&pm->lock);
167 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
169 pr_debug("msk=%p", msk);
172 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
173 const struct mptcp_addr_info *addr)
175 struct mptcp_pm_data *pm = &msk->pm;
177 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
178 READ_ONCE(pm->accept_addr));
180 spin_lock_bh(&pm->lock);
182 if (!READ_ONCE(pm->accept_addr)) {
183 mptcp_pm_announce_addr(msk, addr, true, addr->port);
184 mptcp_pm_add_addr_send_ack(msk);
185 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
189 spin_unlock_bh(&pm->lock);
192 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
194 if (!mptcp_pm_should_add_signal_ipv6(msk) &&
195 !mptcp_pm_should_add_signal_port(msk))
198 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
201 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
203 struct mptcp_pm_data *pm = &msk->pm;
205 pr_debug("msk=%p remote_id=%d", msk, rm_id);
207 spin_lock_bh(&pm->lock);
208 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
210 spin_unlock_bh(&pm->lock);
213 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
215 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
217 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
218 subflow->backup = bkup;
221 /* path manager helpers */
223 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
224 struct mptcp_addr_info *saddr, bool *echo, bool *port)
228 spin_lock_bh(&msk->pm.lock);
230 /* double check after the lock is acquired */
231 if (!mptcp_pm_should_add_signal(msk))
234 *echo = mptcp_pm_should_add_signal_echo(msk);
235 *port = mptcp_pm_should_add_signal_port(msk);
237 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
240 *saddr = msk->pm.local;
241 WRITE_ONCE(msk->pm.addr_signal, 0);
245 spin_unlock_bh(&msk->pm.lock);
249 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
254 spin_lock_bh(&msk->pm.lock);
256 /* double check after the lock is acquired */
257 if (!mptcp_pm_should_rm_signal(msk))
260 if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
263 *rm_id = msk->pm.rm_id;
264 WRITE_ONCE(msk->pm.addr_signal, 0);
268 spin_unlock_bh(&msk->pm.lock);
272 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
274 return mptcp_pm_nl_get_local_id(msk, skc);
277 void mptcp_pm_data_init(struct mptcp_sock *msk)
279 msk->pm.add_addr_signaled = 0;
280 msk->pm.add_addr_accepted = 0;
281 msk->pm.local_addr_used = 0;
282 msk->pm.subflows = 0;
284 WRITE_ONCE(msk->pm.work_pending, false);
285 WRITE_ONCE(msk->pm.addr_signal, 0);
286 WRITE_ONCE(msk->pm.accept_addr, false);
287 WRITE_ONCE(msk->pm.accept_subflow, false);
290 spin_lock_init(&msk->pm.lock);
291 INIT_LIST_HEAD(&msk->pm.anno_list);
293 mptcp_pm_nl_data_init(msk);
296 void __init mptcp_pm_init(void)