cifsd: braces {} should be used on all arms of this statement
[linux-2.6-microblaze.git] / fs / cifsd / connection.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
4  *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
5  */
6
7 #include <linux/mutex.h>
8 #include <linux/freezer.h>
9 #include <linux/module.h>
10
11 #include "server.h"
12 #include "buffer_pool.h"
13 #include "smb_common.h"
14 #include "mgmt/ksmbd_ida.h"
15 #include "connection.h"
16 #include "transport_tcp.h"
17 #include "transport_rdma.h"
18
19 static DEFINE_MUTEX(init_lock);
20
21 static struct ksmbd_conn_ops default_conn_ops;
22
23 static LIST_HEAD(conn_list);
24 static DEFINE_RWLOCK(conn_list_lock);
25
26 /**
27  * ksmbd_conn_free() - free resources of the connection instance
28  *
29  * @conn:       connection instance to be cleand up
30  *
31  * During the thread termination, the corresponding conn instance
32  * resources(sock/memory) are released and finally the conn object is freed.
33  */
34 void ksmbd_conn_free(struct ksmbd_conn *conn)
35 {
36         write_lock(&conn_list_lock);
37         list_del(&conn->conns_list);
38         write_unlock(&conn_list_lock);
39
40         kvfree(conn->request_buf);
41         kfree(conn->preauth_info);
42         kfree(conn);
43 }
44
45 /**
46  * ksmbd_conn_alloc() - initialize a new connection instance
47  *
48  * Return:      ksmbd_conn struct on success, otherwise NULL
49  */
50 struct ksmbd_conn *ksmbd_conn_alloc(void)
51 {
52         struct ksmbd_conn *conn;
53
54         conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
55         if (!conn)
56                 return NULL;
57
58         conn->need_neg = true;
59         conn->status = KSMBD_SESS_NEW;
60         conn->local_nls = load_nls("utf8");
61         if (!conn->local_nls)
62                 conn->local_nls = load_nls_default();
63         atomic_set(&conn->req_running, 0);
64         atomic_set(&conn->r_count, 0);
65         init_waitqueue_head(&conn->req_running_q);
66         INIT_LIST_HEAD(&conn->conns_list);
67         INIT_LIST_HEAD(&conn->sessions);
68         INIT_LIST_HEAD(&conn->requests);
69         INIT_LIST_HEAD(&conn->async_requests);
70         spin_lock_init(&conn->request_lock);
71         spin_lock_init(&conn->credits_lock);
72         ida_init(&conn->async_ida);
73
74         write_lock(&conn_list_lock);
75         list_add(&conn->conns_list, &conn_list);
76         write_unlock(&conn_list_lock);
77         return conn;
78 }
79
80 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
81 {
82         struct ksmbd_conn *t;
83         bool ret = false;
84
85         read_lock(&conn_list_lock);
86         list_for_each_entry(t, &conn_list, conns_list) {
87                 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
88                         continue;
89
90                 ret = true;
91                 break;
92         }
93         read_unlock(&conn_list_lock);
94         return ret;
95 }
96
97 void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
98 {
99         struct ksmbd_conn *conn = work->conn;
100         struct list_head *requests_queue = NULL;
101
102         if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
103                 requests_queue = &conn->requests;
104                 work->syncronous = true;
105         }
106
107         if (requests_queue) {
108                 atomic_inc(&conn->req_running);
109                 spin_lock(&conn->request_lock);
110                 list_add_tail(&work->request_entry, requests_queue);
111                 spin_unlock(&conn->request_lock);
112         }
113 }
114
115 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
116 {
117         struct ksmbd_conn *conn = work->conn;
118         int ret = 1;
119
120         if (list_empty(&work->request_entry) &&
121             list_empty(&work->async_request_entry))
122                 return 0;
123
124         atomic_dec(&conn->req_running);
125         spin_lock(&conn->request_lock);
126         if (!work->multiRsp) {
127                 list_del_init(&work->request_entry);
128                 if (work->syncronous == false)
129                         list_del_init(&work->async_request_entry);
130                 ret = 0;
131         }
132         spin_unlock(&conn->request_lock);
133
134         wake_up_all(&conn->req_running_q);
135         return ret;
136 }
137
138 static void ksmbd_conn_lock(struct ksmbd_conn *conn)
139 {
140         mutex_lock(&conn->srv_mutex);
141 }
142
143 static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
144 {
145         mutex_unlock(&conn->srv_mutex);
146 }
147
148 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
149 {
150         wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
151 }
152
153 int ksmbd_conn_write(struct ksmbd_work *work)
154 {
155         struct ksmbd_conn *conn = work->conn;
156         struct smb_hdr *rsp_hdr = work->response_buf;
157         size_t len = 0;
158         int sent;
159         struct kvec iov[3];
160         int iov_idx = 0;
161
162         ksmbd_conn_try_dequeue_request(work);
163         if (!rsp_hdr) {
164                 ksmbd_err("NULL response header\n");
165                 return -EINVAL;
166         }
167
168         if (work->tr_buf) {
169                 iov[iov_idx] = (struct kvec) { work->tr_buf,
170                                 sizeof(struct smb2_transform_hdr) };
171                 len += iov[iov_idx++].iov_len;
172         }
173
174         if (work->aux_payload_sz) {
175                 iov[iov_idx] = (struct kvec) { rsp_hdr, work->resp_hdr_sz };
176                 len += iov[iov_idx++].iov_len;
177                 iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
178                 len += iov[iov_idx++].iov_len;
179         } else {
180                 if (work->tr_buf)
181                         iov[iov_idx].iov_len = work->resp_hdr_sz;
182                 else
183                         iov[iov_idx].iov_len = get_rfc1002_len(rsp_hdr) + 4;
184                 iov[iov_idx].iov_base = rsp_hdr;
185                 len += iov[iov_idx++].iov_len;
186         }
187
188         ksmbd_conn_lock(conn);
189         sent = conn->transport->ops->writev(conn->transport, &iov[0],
190                                         iov_idx, len,
191                                         work->need_invalidate_rkey,
192                                         work->remote_key);
193         ksmbd_conn_unlock(conn);
194
195         if (sent < 0) {
196                 ksmbd_err("Failed to send message: %d\n", sent);
197                 return sent;
198         }
199
200         return 0;
201 }
202
203 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
204                 unsigned int buflen, u32 remote_key, u64 remote_offset,
205                 u32 remote_len)
206 {
207         int ret = -EINVAL;
208
209         if (conn->transport->ops->rdma_read)
210                 ret = conn->transport->ops->rdma_read(conn->transport,
211                                                 buf, buflen,
212                                                 remote_key, remote_offset,
213                                                 remote_len);
214         return ret;
215 }
216
217 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
218                 unsigned int buflen, u32 remote_key, u64 remote_offset,
219                 u32 remote_len)
220 {
221         int ret = -EINVAL;
222
223         if (conn->transport->ops->rdma_write)
224                 ret = conn->transport->ops->rdma_write(conn->transport,
225                                                 buf, buflen,
226                                                 remote_key, remote_offset,
227                                                 remote_len);
228         return ret;
229 }
230
231 bool ksmbd_conn_alive(struct ksmbd_conn *conn)
232 {
233         if (!ksmbd_server_running())
234                 return false;
235
236         if (conn->status == KSMBD_SESS_EXITING)
237                 return false;
238
239         if (kthread_should_stop())
240                 return false;
241
242         if (atomic_read(&conn->stats.open_files_count) > 0)
243                 return true;
244
245         /*
246          * Stop current session if the time that get last request from client
247          * is bigger than deadtime user configured and openning file count is
248          * zero.
249          */
250         if (server_conf.deadtime > 0 &&
251             time_after(jiffies, conn->last_active + server_conf.deadtime)) {
252                 ksmbd_debug(CONN, "No response from client in %lu minutes\n",
253                         server_conf.deadtime / SMB_ECHO_INTERVAL);
254                 return false;
255         }
256         return true;
257 }
258
259 /**
260  * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
261  * @p:          connection instance
262  *
263  * One thread each per connection
264  *
265  * Return:      0 on success
266  */
267 int ksmbd_conn_handler_loop(void *p)
268 {
269         struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
270         struct ksmbd_transport *t = conn->transport;
271         unsigned int pdu_size;
272         char hdr_buf[4] = {0,};
273         int size;
274
275         mutex_init(&conn->srv_mutex);
276         __module_get(THIS_MODULE);
277
278         if (t->ops->prepare && t->ops->prepare(t))
279                 goto out;
280
281         conn->last_active = jiffies;
282         while (ksmbd_conn_alive(conn)) {
283                 if (try_to_freeze())
284                         continue;
285
286                 kvfree(conn->request_buf);
287                 conn->request_buf = NULL;
288
289                 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
290                 if (size != sizeof(hdr_buf))
291                         break;
292
293                 pdu_size = get_rfc1002_len(hdr_buf);
294                 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
295
296                 /* make sure we have enough to get to SMB header end */
297                 if (!ksmbd_pdu_size_has_room(pdu_size)) {
298                         ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
299                                     pdu_size);
300                         continue;
301                 }
302
303                 /* 4 for rfc1002 length field */
304                 size = pdu_size + 4;
305                 conn->request_buf = kvmalloc(size, GFP_KERNEL);
306                 if (!conn->request_buf)
307                         continue;
308
309                 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
310                 if (!ksmbd_smb_request(conn))
311                         break;
312
313                 /*
314                  * We already read 4 bytes to find out PDU size, now
315                  * read in PDU
316                  */
317                 size = t->ops->read(t, conn->request_buf + 4, pdu_size);
318                 if (size < 0) {
319                         ksmbd_err("sock_read failed: %d\n", size);
320                         break;
321                 }
322
323                 if (size != pdu_size) {
324                         ksmbd_err("PDU error. Read: %d, Expected: %d\n",
325                                   size,
326                                   pdu_size);
327                         continue;
328                 }
329
330                 if (!default_conn_ops.process_fn) {
331                         ksmbd_err("No connection request callback\n");
332                         break;
333                 }
334
335                 if (default_conn_ops.process_fn(conn)) {
336                         ksmbd_err("Cannot handle request\n");
337                         break;
338                 }
339         }
340
341 out:
342         /* Wait till all reference dropped to the Server object*/
343         while (atomic_read(&conn->r_count) > 0)
344                 schedule_timeout(HZ);
345
346         unload_nls(conn->local_nls);
347         if (default_conn_ops.terminate_fn)
348                 default_conn_ops.terminate_fn(conn);
349         t->ops->disconnect(t);
350         module_put(THIS_MODULE);
351         return 0;
352 }
353
354 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
355 {
356         default_conn_ops.process_fn = ops->process_fn;
357         default_conn_ops.terminate_fn = ops->terminate_fn;
358 }
359
360 int ksmbd_conn_transport_init(void)
361 {
362         int ret;
363
364         mutex_lock(&init_lock);
365         ret = ksmbd_tcp_init();
366         if (ret) {
367                 pr_err("Failed to init TCP subsystem: %d\n", ret);
368                 goto out;
369         }
370
371         ret = ksmbd_rdma_init();
372         if (ret) {
373                 pr_err("Failed to init KSMBD subsystem: %d\n", ret);
374                 goto out;
375         }
376 out:
377         mutex_unlock(&init_lock);
378         return ret;
379 }
380
381 static void stop_sessions(void)
382 {
383         struct ksmbd_conn *conn;
384
385 again:
386         read_lock(&conn_list_lock);
387         list_for_each_entry(conn, &conn_list, conns_list) {
388                 struct task_struct *task;
389
390                 task = conn->transport->handler;
391                 if (task)
392                         ksmbd_debug(CONN, "Stop session handler %s/%d\n",
393                                   task->comm, task_pid_nr(task));
394                 conn->status = KSMBD_SESS_EXITING;
395         }
396         read_unlock(&conn_list_lock);
397
398         if (!list_empty(&conn_list)) {
399                 schedule_timeout_interruptible(HZ / 10); /* 100ms */
400                 goto again;
401         }
402 }
403
404 void ksmbd_conn_transport_destroy(void)
405 {
406         mutex_lock(&init_lock);
407         ksmbd_tcp_destroy();
408         ksmbd_rdma_destroy();
409         stop_sessions();
410         mutex_unlock(&init_lock);
411 }