mptcp: drop *_max fields in mptcp_pm_data
[linux-2.6-microblaze.git] / net / mptcp / pm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7
8 #include <linux/kernel.h>
9 #include <net/tcp.h>
10 #include <net/mptcp.h>
11 #include "protocol.h"
12
13 /* path manager command handlers */
14
15 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
16                            const struct mptcp_addr_info *addr,
17                            bool echo, bool port)
18 {
19         u8 add_addr = READ_ONCE(msk->pm.addr_signal);
20
21         pr_debug("msk=%p, local_id=%d", msk, addr->id);
22
23         if (add_addr) {
24                 pr_warn("addr_signal error, add_addr=%d", add_addr);
25                 return -EINVAL;
26         }
27
28         msk->pm.local = *addr;
29         add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
30         if (echo)
31                 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
32         if (addr->family == AF_INET6)
33                 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
34         if (port)
35                 add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
36         WRITE_ONCE(msk->pm.addr_signal, add_addr);
37         return 0;
38 }
39
40 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
41 {
42         u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
43
44         pr_debug("msk=%p, local_id=%d", msk, local_id);
45
46         if (rm_addr) {
47                 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
48                 return -EINVAL;
49         }
50
51         msk->pm.rm_id = local_id;
52         rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
53         WRITE_ONCE(msk->pm.addr_signal, rm_addr);
54         return 0;
55 }
56
57 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
58 {
59         pr_debug("msk=%p, local_id=%d", msk, local_id);
60
61         spin_lock_bh(&msk->pm.lock);
62         mptcp_pm_nl_rm_subflow_received(msk, local_id);
63         spin_unlock_bh(&msk->pm.lock);
64         return 0;
65 }
66
67 /* path manager event handlers */
68
69 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
70 {
71         struct mptcp_pm_data *pm = &msk->pm;
72
73         pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
74
75         WRITE_ONCE(pm->server_side, server_side);
76 }
77
78 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
79 {
80         struct mptcp_pm_data *pm = &msk->pm;
81         unsigned int subflows_max;
82         int ret = 0;
83
84         subflows_max = mptcp_pm_get_subflows_max(msk);
85
86         pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
87                  subflows_max, READ_ONCE(pm->accept_subflow));
88
89         /* try to avoid acquiring the lock below */
90         if (!READ_ONCE(pm->accept_subflow))
91                 return false;
92
93         spin_lock_bh(&pm->lock);
94         if (READ_ONCE(pm->accept_subflow)) {
95                 ret = pm->subflows < subflows_max;
96                 if (ret && ++pm->subflows == subflows_max)
97                         WRITE_ONCE(pm->accept_subflow, false);
98         }
99         spin_unlock_bh(&pm->lock);
100
101         return ret;
102 }
103
104 /* return true if the new status bit is currently cleared, that is, this event
105  * can be server, eventually by an already scheduled work
106  */
107 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
108                                    enum mptcp_pm_status new_status)
109 {
110         pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
111                  BIT(new_status));
112         if (msk->pm.status & BIT(new_status))
113                 return false;
114
115         msk->pm.status |= BIT(new_status);
116         mptcp_schedule_work((struct sock *)msk);
117         return true;
118 }
119
120 void mptcp_pm_fully_established(struct mptcp_sock *msk)
121 {
122         struct mptcp_pm_data *pm = &msk->pm;
123
124         pr_debug("msk=%p", msk);
125
126         /* try to avoid acquiring the lock below */
127         if (!READ_ONCE(pm->work_pending))
128                 return;
129
130         spin_lock_bh(&pm->lock);
131
132         /* mptcp_pm_fully_established() can be invoked by multiple
133          * racing paths - accept() and check_fully_established()
134          * be sure to serve this event only once.
135          */
136         if (READ_ONCE(pm->work_pending) &&
137             !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
138                 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
139         msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
140
141         spin_unlock_bh(&pm->lock);
142 }
143
144 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
145 {
146         pr_debug("msk=%p", msk);
147 }
148
149 void mptcp_pm_subflow_established(struct mptcp_sock *msk,
150                                   struct mptcp_subflow_context *subflow)
151 {
152         struct mptcp_pm_data *pm = &msk->pm;
153
154         pr_debug("msk=%p", msk);
155
156         if (!READ_ONCE(pm->work_pending))
157                 return;
158
159         spin_lock_bh(&pm->lock);
160
161         if (READ_ONCE(pm->work_pending))
162                 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
163
164         spin_unlock_bh(&pm->lock);
165 }
166
167 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
168 {
169         pr_debug("msk=%p", msk);
170 }
171
172 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
173                                 const struct mptcp_addr_info *addr)
174 {
175         struct mptcp_pm_data *pm = &msk->pm;
176
177         pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
178                  READ_ONCE(pm->accept_addr));
179
180         spin_lock_bh(&pm->lock);
181
182         if (!READ_ONCE(pm->accept_addr)) {
183                 mptcp_pm_announce_addr(msk, addr, true, addr->port);
184                 mptcp_pm_add_addr_send_ack(msk);
185         } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
186                 pm->remote = *addr;
187         }
188
189         spin_unlock_bh(&pm->lock);
190 }
191
192 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
193 {
194         if (!mptcp_pm_should_add_signal_ipv6(msk) &&
195             !mptcp_pm_should_add_signal_port(msk))
196                 return;
197
198         mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
199 }
200
201 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
202 {
203         struct mptcp_pm_data *pm = &msk->pm;
204
205         pr_debug("msk=%p remote_id=%d", msk, rm_id);
206
207         spin_lock_bh(&pm->lock);
208         mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
209         pm->rm_id = rm_id;
210         spin_unlock_bh(&pm->lock);
211 }
212
213 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
214 {
215         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
216
217         pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
218         subflow->backup = bkup;
219 }
220
221 /* path manager helpers */
222
223 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
224                               struct mptcp_addr_info *saddr, bool *echo, bool *port)
225 {
226         int ret = false;
227
228         spin_lock_bh(&msk->pm.lock);
229
230         /* double check after the lock is acquired */
231         if (!mptcp_pm_should_add_signal(msk))
232                 goto out_unlock;
233
234         *echo = mptcp_pm_should_add_signal_echo(msk);
235         *port = mptcp_pm_should_add_signal_port(msk);
236
237         if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
238                 goto out_unlock;
239
240         *saddr = msk->pm.local;
241         WRITE_ONCE(msk->pm.addr_signal, 0);
242         ret = true;
243
244 out_unlock:
245         spin_unlock_bh(&msk->pm.lock);
246         return ret;
247 }
248
249 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
250                              u8 *rm_id)
251 {
252         int ret = false;
253
254         spin_lock_bh(&msk->pm.lock);
255
256         /* double check after the lock is acquired */
257         if (!mptcp_pm_should_rm_signal(msk))
258                 goto out_unlock;
259
260         if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
261                 goto out_unlock;
262
263         *rm_id = msk->pm.rm_id;
264         WRITE_ONCE(msk->pm.addr_signal, 0);
265         ret = true;
266
267 out_unlock:
268         spin_unlock_bh(&msk->pm.lock);
269         return ret;
270 }
271
272 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
273 {
274         return mptcp_pm_nl_get_local_id(msk, skc);
275 }
276
277 void mptcp_pm_data_init(struct mptcp_sock *msk)
278 {
279         msk->pm.add_addr_signaled = 0;
280         msk->pm.add_addr_accepted = 0;
281         msk->pm.local_addr_used = 0;
282         msk->pm.subflows = 0;
283         msk->pm.rm_id = 0;
284         WRITE_ONCE(msk->pm.work_pending, false);
285         WRITE_ONCE(msk->pm.addr_signal, 0);
286         WRITE_ONCE(msk->pm.accept_addr, false);
287         WRITE_ONCE(msk->pm.accept_subflow, false);
288         msk->pm.status = 0;
289
290         spin_lock_init(&msk->pm.lock);
291         INIT_LIST_HEAD(&msk->pm.anno_list);
292
293         mptcp_pm_nl_data_init(msk);
294 }
295
296 void __init mptcp_pm_init(void)
297 {
298         mptcp_pm_nl_init();
299 }