Linux 6.9-rc1
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *   fs/cifs/transport.c
4  *
5  *   Copyright (C) International Business Machines  Corp., 2002,2008
6  *   Author(s): Steve French (sfrench@us.ibm.com)
7  *   Jeremy Allison (jra@samba.org) 2006.
8  *
9  */
10
11 #include <linux/fs.h>
12 #include <linux/list.h>
13 #include <linux/gfp.h>
14 #include <linux/wait.h>
15 #include <linux/net.h>
16 #include <linux/delay.h>
17 #include <linux/freezer.h>
18 #include <linux/tcp.h>
19 #include <linux/bvec.h>
20 #include <linux/highmem.h>
21 #include <linux/uaccess.h>
22 #include <asm/processor.h>
23 #include <linux/mempool.h>
24 #include <linux/sched/signal.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31
32 /* Max number of iovectors we can use off the stack when sending requests. */
33 #define CIFS_MAX_IOV_SIZE 8
34
35 void
36 cifs_wake_up_task(struct mid_q_entry *mid)
37 {
38         wake_up_process(mid->callback_data);
39 }
40
41 struct mid_q_entry *
42 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
43 {
44         struct mid_q_entry *temp;
45
46         if (server == NULL) {
47                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
48                 return NULL;
49         }
50
51         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
52         memset(temp, 0, sizeof(struct mid_q_entry));
53         kref_init(&temp->refcount);
54         temp->mid = get_mid(smb_buffer);
55         temp->pid = current->pid;
56         temp->command = cpu_to_le16(smb_buffer->Command);
57         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
58         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
59         /* when mid allocated can be before when sent */
60         temp->when_alloc = jiffies;
61         temp->server = server;
62
63         /*
64          * The default is for the mid to be synchronous, so the
65          * default callback just wakes up the current task.
66          */
67         get_task_struct(current);
68         temp->creator = current;
69         temp->callback = cifs_wake_up_task;
70         temp->callback_data = current;
71
72         atomic_inc(&midCount);
73         temp->mid_state = MID_REQUEST_ALLOCATED;
74         return temp;
75 }
76
77 static void _cifs_mid_q_entry_release(struct kref *refcount)
78 {
79         struct mid_q_entry *midEntry =
80                         container_of(refcount, struct mid_q_entry, refcount);
81 #ifdef CONFIG_CIFS_STATS2
82         __le16 command = midEntry->server->vals->lock_cmd;
83         __u16 smb_cmd = le16_to_cpu(midEntry->command);
84         unsigned long now;
85         unsigned long roundtrip_time;
86 #endif
87         struct TCP_Server_Info *server = midEntry->server;
88
89         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91             server->ops->handle_cancelled_mid)
92                 server->ops->handle_cancelled_mid(midEntry, server);
93
94         midEntry->mid_state = MID_FREE;
95         atomic_dec(&midCount);
96         if (midEntry->large_buf)
97                 cifs_buf_release(midEntry->resp_buf);
98         else
99                 cifs_small_buf_release(midEntry->resp_buf);
100 #ifdef CONFIG_CIFS_STATS2
101         now = jiffies;
102         if (now < midEntry->when_alloc)
103                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
104         roundtrip_time = now - midEntry->when_alloc;
105
106         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108                         server->slowest_cmd[smb_cmd] = roundtrip_time;
109                         server->fastest_cmd[smb_cmd] = roundtrip_time;
110                 } else {
111                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
113                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
115                 }
116                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117                 server->time_per_cmd[smb_cmd] += roundtrip_time;
118         }
119         /*
120          * commands taking longer than one second (default) can be indications
121          * that something is wrong, unless it is quite a slow link or a very
122          * busy server. Note that this calc is unlikely or impossible to wrap
123          * as long as slow_rsp_threshold is not set way above recommended max
124          * value (32767 ie 9 hours) and is generally harmless even if wrong
125          * since only affects debug counters - so leaving the calc as simple
126          * comparison rather than doing multiple conversions and overflow
127          * checks
128          */
129         if ((slow_rsp_threshold != 0) &&
130             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
131             (midEntry->command != command)) {
132                 /*
133                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134                  * NB: le16_to_cpu returns unsigned so can not be negative below
135                  */
136                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
138
139                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
140                                midEntry->when_sent, midEntry->when_received);
141                 if (cifsFYI & CIFS_TIMER) {
142                         pr_debug("slow rsp: cmd %d mid %llu",
143                                  midEntry->command, midEntry->mid);
144                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145                                   now - midEntry->when_alloc,
146                                   now - midEntry->when_sent,
147                                   now - midEntry->when_received);
148                 }
149         }
150 #endif
151         put_task_struct(midEntry->creator);
152
153         mempool_free(midEntry, cifs_mid_poolp);
154 }
155
156 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
157 {
158         spin_lock(&GlobalMid_Lock);
159         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
160         spin_unlock(&GlobalMid_Lock);
161 }
162
163 void DeleteMidQEntry(struct mid_q_entry *midEntry)
164 {
165         cifs_mid_q_entry_release(midEntry);
166 }
167
168 void
169 cifs_delete_mid(struct mid_q_entry *mid)
170 {
171         spin_lock(&GlobalMid_Lock);
172         if (!(mid->mid_flags & MID_DELETED)) {
173                 list_del_init(&mid->qhead);
174                 mid->mid_flags |= MID_DELETED;
175         }
176         spin_unlock(&GlobalMid_Lock);
177
178         DeleteMidQEntry(mid);
179 }
180
181 /*
182  * smb_send_kvec - send an array of kvecs to the server
183  * @server:     Server to send the data to
184  * @smb_msg:    Message to send
185  * @sent:       amount of data sent on socket is stored here
186  *
187  * Our basic "send data to server" function. Should be called with srv_mutex
188  * held. The caller is responsible for handling the results.
189  */
190 static int
191 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
192               size_t *sent)
193 {
194         int rc = 0;
195         int retries = 0;
196         struct socket *ssocket = server->ssocket;
197
198         *sent = 0;
199
200         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
201         smb_msg->msg_namelen = sizeof(struct sockaddr);
202         smb_msg->msg_control = NULL;
203         smb_msg->msg_controllen = 0;
204         if (server->noblocksnd)
205                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
206         else
207                 smb_msg->msg_flags = MSG_NOSIGNAL;
208
209         while (msg_data_left(smb_msg)) {
210                 /*
211                  * If blocking send, we try 3 times, since each can block
212                  * for 5 seconds. For nonblocking  we have to try more
213                  * but wait increasing amounts of time allowing time for
214                  * socket to clear.  The overall time we wait in either
215                  * case to send on the socket is about 15 seconds.
216                  * Similarly we wait for 15 seconds for a response from
217                  * the server in SendReceive[2] for the server to send
218                  * a response back for most types of requests (except
219                  * SMB Write past end of file which can be slow, and
220                  * blocking lock operations). NFS waits slightly longer
221                  * than CIFS, but this can make it take longer for
222                  * nonresponsive servers to be detected and 15 seconds
223                  * is more than enough time for modern networks to
224                  * send a packet.  In most cases if we fail to send
225                  * after the retries we will kill the socket and
226                  * reconnect which may clear the network problem.
227                  */
228                 rc = sock_sendmsg(ssocket, smb_msg);
229                 if (rc == -EAGAIN) {
230                         retries++;
231                         if (retries >= 14 ||
232                             (!server->noblocksnd && (retries > 2))) {
233                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
234                                          ssocket);
235                                 return -EAGAIN;
236                         }
237                         msleep(1 << retries);
238                         continue;
239                 }
240
241                 if (rc < 0)
242                         return rc;
243
244                 if (rc == 0) {
245                         /* should never happen, letting socket clear before
246                            retrying is our only obvious option here */
247                         cifs_server_dbg(VFS, "tcp sent no data\n");
248                         msleep(500);
249                         continue;
250                 }
251
252                 /* send was at least partially successful */
253                 *sent += rc;
254                 retries = 0; /* in case we get ENOSPC on the next send */
255         }
256         return 0;
257 }
258
259 unsigned long
260 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
261 {
262         unsigned int i;
263         struct kvec *iov;
264         int nvec;
265         unsigned long buflen = 0;
266
267         if (server->vals->header_preamble_size == 0 &&
268             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
269                 iov = &rqst->rq_iov[1];
270                 nvec = rqst->rq_nvec - 1;
271         } else {
272                 iov = rqst->rq_iov;
273                 nvec = rqst->rq_nvec;
274         }
275
276         /* total up iov array first */
277         for (i = 0; i < nvec; i++)
278                 buflen += iov[i].iov_len;
279
280         /*
281          * Add in the page array if there is one. The caller needs to make
282          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
283          * multiple pages ends at page boundary, rq_tailsz needs to be set to
284          * PAGE_SIZE.
285          */
286         if (rqst->rq_npages) {
287                 if (rqst->rq_npages == 1)
288                         buflen += rqst->rq_tailsz;
289                 else {
290                         /*
291                          * If there is more than one page, calculate the
292                          * buffer length based on rq_offset and rq_tailsz
293                          */
294                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
295                                         rqst->rq_offset;
296                         buflen += rqst->rq_tailsz;
297                 }
298         }
299
300         return buflen;
301 }
302
303 static int
304 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
305                 struct smb_rqst *rqst)
306 {
307         int rc = 0;
308         struct kvec *iov;
309         int n_vec;
310         unsigned int send_length = 0;
311         unsigned int i, j;
312         sigset_t mask, oldmask;
313         size_t total_len = 0, sent, size;
314         struct socket *ssocket = server->ssocket;
315         struct msghdr smb_msg;
316         __be32 rfc1002_marker;
317
318         if (cifs_rdma_enabled(server)) {
319                 /* return -EAGAIN when connecting or reconnecting */
320                 rc = -EAGAIN;
321                 if (server->smbd_conn)
322                         rc = smbd_send(server, num_rqst, rqst);
323                 goto smbd_done;
324         }
325
326         if (ssocket == NULL)
327                 return -EAGAIN;
328
329         if (fatal_signal_pending(current)) {
330                 cifs_dbg(FYI, "signal pending before send request\n");
331                 return -ERESTARTSYS;
332         }
333
334         /* cork the socket */
335         tcp_sock_set_cork(ssocket->sk, true);
336
337         for (j = 0; j < num_rqst; j++)
338                 send_length += smb_rqst_len(server, &rqst[j]);
339         rfc1002_marker = cpu_to_be32(send_length);
340
341         /*
342          * We should not allow signals to interrupt the network send because
343          * any partial send will cause session reconnects thus increasing
344          * latency of system calls and overload a server with unnecessary
345          * requests.
346          */
347
348         sigfillset(&mask);
349         sigprocmask(SIG_BLOCK, &mask, &oldmask);
350
351         /* Generate a rfc1002 marker for SMB2+ */
352         if (server->vals->header_preamble_size == 0) {
353                 struct kvec hiov = {
354                         .iov_base = &rfc1002_marker,
355                         .iov_len  = 4
356                 };
357                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
358                 rc = smb_send_kvec(server, &smb_msg, &sent);
359                 if (rc < 0)
360                         goto unmask;
361
362                 total_len += sent;
363                 send_length += 4;
364         }
365
366         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
367
368         for (j = 0; j < num_rqst; j++) {
369                 iov = rqst[j].rq_iov;
370                 n_vec = rqst[j].rq_nvec;
371
372                 size = 0;
373                 for (i = 0; i < n_vec; i++) {
374                         dump_smb(iov[i].iov_base, iov[i].iov_len);
375                         size += iov[i].iov_len;
376                 }
377
378                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
379
380                 rc = smb_send_kvec(server, &smb_msg, &sent);
381                 if (rc < 0)
382                         goto unmask;
383
384                 total_len += sent;
385
386                 /* now walk the page array and send each page in it */
387                 for (i = 0; i < rqst[j].rq_npages; i++) {
388                         struct bio_vec bvec;
389
390                         bvec.bv_page = rqst[j].rq_pages[i];
391                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
392                                              &bvec.bv_offset);
393
394                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
395                                       &bvec, 1, bvec.bv_len);
396                         rc = smb_send_kvec(server, &smb_msg, &sent);
397                         if (rc < 0)
398                                 break;
399
400                         total_len += sent;
401                 }
402         }
403
404 unmask:
405         sigprocmask(SIG_SETMASK, &oldmask, NULL);
406
407         /*
408          * If signal is pending but we have already sent the whole packet to
409          * the server we need to return success status to allow a corresponding
410          * mid entry to be kept in the pending requests queue thus allowing
411          * to handle responses from the server by the client.
412          *
413          * If only part of the packet has been sent there is no need to hide
414          * interrupt because the session will be reconnected anyway, so there
415          * won't be any response from the server to handle.
416          */
417
418         if (signal_pending(current) && (total_len != send_length)) {
419                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
420                 rc = -ERESTARTSYS;
421         }
422
423         /* uncork it */
424         tcp_sock_set_cork(ssocket->sk, false);
425
426         if ((total_len > 0) && (total_len != send_length)) {
427                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
428                          send_length, total_len);
429                 /*
430                  * If we have only sent part of an SMB then the next SMB could
431                  * be taken as the remainder of this one. We need to kill the
432                  * socket so the server throws away the partial SMB
433                  */
434                 spin_lock(&GlobalMid_Lock);
435                 server->tcpStatus = CifsNeedReconnect;
436                 spin_unlock(&GlobalMid_Lock);
437                 trace_smb3_partial_send_reconnect(server->CurrentMid,
438                                                   server->conn_id, server->hostname);
439         }
440 smbd_done:
441         if (rc < 0 && rc != -EINTR)
442                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
443                          rc);
444         else if (rc > 0)
445                 rc = 0;
446
447         return rc;
448 }
449
450 static int
451 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
452               struct smb_rqst *rqst, int flags)
453 {
454         struct kvec iov;
455         struct smb2_transform_hdr *tr_hdr;
456         struct smb_rqst cur_rqst[MAX_COMPOUND];
457         int rc;
458
459         if (!(flags & CIFS_TRANSFORM_REQ))
460                 return __smb_send_rqst(server, num_rqst, rqst);
461
462         if (num_rqst > MAX_COMPOUND - 1)
463                 return -ENOMEM;
464
465         if (!server->ops->init_transform_rq) {
466                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
467                 return -EIO;
468         }
469
470         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
471         if (!tr_hdr)
472                 return -ENOMEM;
473
474         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
475         memset(&iov, 0, sizeof(iov));
476         memset(tr_hdr, 0, sizeof(*tr_hdr));
477
478         iov.iov_base = tr_hdr;
479         iov.iov_len = sizeof(*tr_hdr);
480         cur_rqst[0].rq_iov = &iov;
481         cur_rqst[0].rq_nvec = 1;
482
483         rc = server->ops->init_transform_rq(server, num_rqst + 1,
484                                             &cur_rqst[0], rqst);
485         if (rc)
486                 goto out;
487
488         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
489         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
490 out:
491         kfree(tr_hdr);
492         return rc;
493 }
494
495 int
496 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
497          unsigned int smb_buf_length)
498 {
499         struct kvec iov[2];
500         struct smb_rqst rqst = { .rq_iov = iov,
501                                  .rq_nvec = 2 };
502
503         iov[0].iov_base = smb_buffer;
504         iov[0].iov_len = 4;
505         iov[1].iov_base = (char *)smb_buffer + 4;
506         iov[1].iov_len = smb_buf_length;
507
508         return __smb_send_rqst(server, 1, &rqst);
509 }
510
511 static int
512 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
513                       const int timeout, const int flags,
514                       unsigned int *instance)
515 {
516         long rc;
517         int *credits;
518         int optype;
519         long int t;
520         int scredits, in_flight;
521
522         if (timeout < 0)
523                 t = MAX_JIFFY_OFFSET;
524         else
525                 t = msecs_to_jiffies(timeout);
526
527         optype = flags & CIFS_OP_MASK;
528
529         *instance = 0;
530
531         credits = server->ops->get_credits_field(server, optype);
532         /* Since an echo is already inflight, no need to wait to send another */
533         if (*credits <= 0 && optype == CIFS_ECHO_OP)
534                 return -EAGAIN;
535
536         spin_lock(&server->req_lock);
537         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
538                 /* oplock breaks must not be held up */
539                 server->in_flight++;
540                 if (server->in_flight > server->max_in_flight)
541                         server->max_in_flight = server->in_flight;
542                 *credits -= 1;
543                 *instance = server->reconnect_instance;
544                 scredits = *credits;
545                 in_flight = server->in_flight;
546                 spin_unlock(&server->req_lock);
547
548                 trace_smb3_add_credits(server->CurrentMid,
549                                 server->conn_id, server->hostname, scredits, -1, in_flight);
550                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
551                                 __func__, 1, scredits);
552
553                 return 0;
554         }
555
556         while (1) {
557                 if (*credits < num_credits) {
558                         scredits = *credits;
559                         spin_unlock(&server->req_lock);
560
561                         cifs_num_waiters_inc(server);
562                         rc = wait_event_killable_timeout(server->request_q,
563                                 has_credits(server, credits, num_credits), t);
564                         cifs_num_waiters_dec(server);
565                         if (!rc) {
566                                 spin_lock(&server->req_lock);
567                                 scredits = *credits;
568                                 in_flight = server->in_flight;
569                                 spin_unlock(&server->req_lock);
570
571                                 trace_smb3_credit_timeout(server->CurrentMid,
572                                                 server->conn_id, server->hostname, scredits,
573                                                 num_credits, in_flight);
574                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
575                                                 timeout);
576                                 return -EBUSY;
577                         }
578                         if (rc == -ERESTARTSYS)
579                                 return -ERESTARTSYS;
580                         spin_lock(&server->req_lock);
581                 } else {
582                         if (server->tcpStatus == CifsExiting) {
583                                 spin_unlock(&server->req_lock);
584                                 return -ENOENT;
585                         }
586
587                         /*
588                          * For normal commands, reserve the last MAX_COMPOUND
589                          * credits to compound requests.
590                          * Otherwise these compounds could be permanently
591                          * starved for credits by single-credit requests.
592                          *
593                          * To prevent spinning CPU, block this thread until
594                          * there are >MAX_COMPOUND credits available.
595                          * But only do this is we already have a lot of
596                          * credits in flight to avoid triggering this check
597                          * for servers that are slow to hand out credits on
598                          * new sessions.
599                          */
600                         if (!optype && num_credits == 1 &&
601                             server->in_flight > 2 * MAX_COMPOUND &&
602                             *credits <= MAX_COMPOUND) {
603                                 spin_unlock(&server->req_lock);
604
605                                 cifs_num_waiters_inc(server);
606                                 rc = wait_event_killable_timeout(
607                                         server->request_q,
608                                         has_credits(server, credits,
609                                                     MAX_COMPOUND + 1),
610                                         t);
611                                 cifs_num_waiters_dec(server);
612                                 if (!rc) {
613                                         spin_lock(&server->req_lock);
614                                         scredits = *credits;
615                                         in_flight = server->in_flight;
616                                         spin_unlock(&server->req_lock);
617
618                                         trace_smb3_credit_timeout(
619                                                         server->CurrentMid,
620                                                         server->conn_id, server->hostname,
621                                                         scredits, num_credits, in_flight);
622                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
623                                                         timeout);
624                                         return -EBUSY;
625                                 }
626                                 if (rc == -ERESTARTSYS)
627                                         return -ERESTARTSYS;
628                                 spin_lock(&server->req_lock);
629                                 continue;
630                         }
631
632                         /*
633                          * Can not count locking commands against total
634                          * as they are allowed to block on server.
635                          */
636
637                         /* update # of requests on the wire to server */
638                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
639                                 *credits -= num_credits;
640                                 server->in_flight += num_credits;
641                                 if (server->in_flight > server->max_in_flight)
642                                         server->max_in_flight = server->in_flight;
643                                 *instance = server->reconnect_instance;
644                         }
645                         scredits = *credits;
646                         in_flight = server->in_flight;
647                         spin_unlock(&server->req_lock);
648
649                         trace_smb3_add_credits(server->CurrentMid,
650                                         server->conn_id, server->hostname, scredits,
651                                         -(num_credits), in_flight);
652                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
653                                         __func__, num_credits, scredits);
654                         break;
655                 }
656         }
657         return 0;
658 }
659
660 static int
661 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
662                       unsigned int *instance)
663 {
664         return wait_for_free_credits(server, 1, -1, flags,
665                                      instance);
666 }
667
668 static int
669 wait_for_compound_request(struct TCP_Server_Info *server, int num,
670                           const int flags, unsigned int *instance)
671 {
672         int *credits;
673         int scredits, in_flight;
674
675         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
676
677         spin_lock(&server->req_lock);
678         scredits = *credits;
679         in_flight = server->in_flight;
680
681         if (*credits < num) {
682                 /*
683                  * If the server is tight on resources or just gives us less
684                  * credits for other reasons (e.g. requests are coming out of
685                  * order and the server delays granting more credits until it
686                  * processes a missing mid) and we exhausted most available
687                  * credits there may be situations when we try to send
688                  * a compound request but we don't have enough credits. At this
689                  * point the client needs to decide if it should wait for
690                  * additional credits or fail the request. If at least one
691                  * request is in flight there is a high probability that the
692                  * server will return enough credits to satisfy this compound
693                  * request.
694                  *
695                  * Return immediately if no requests in flight since we will be
696                  * stuck on waiting for credits.
697                  */
698                 if (server->in_flight == 0) {
699                         spin_unlock(&server->req_lock);
700                         trace_smb3_insufficient_credits(server->CurrentMid,
701                                         server->conn_id, server->hostname, scredits,
702                                         num, in_flight);
703                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
704                                         __func__, in_flight, num, scredits);
705                         return -EDEADLK;
706                 }
707         }
708         spin_unlock(&server->req_lock);
709
710         return wait_for_free_credits(server, num, 60000, flags,
711                                      instance);
712 }
713
714 int
715 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
716                       unsigned int *num, struct cifs_credits *credits)
717 {
718         *num = size;
719         credits->value = 0;
720         credits->instance = server->reconnect_instance;
721         return 0;
722 }
723
724 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
725                         struct mid_q_entry **ppmidQ)
726 {
727         if (ses->server->tcpStatus == CifsExiting) {
728                 return -ENOENT;
729         }
730
731         if (ses->server->tcpStatus == CifsNeedReconnect) {
732                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
733                 return -EAGAIN;
734         }
735
736         if (ses->status == CifsNew) {
737                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
738                         (in_buf->Command != SMB_COM_NEGOTIATE))
739                         return -EAGAIN;
740                 /* else ok - we are setting up session */
741         }
742
743         if (ses->status == CifsExiting) {
744                 /* check if SMB session is bad because we are setting it up */
745                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
746                         return -EAGAIN;
747                 /* else ok - we are shutting down session */
748         }
749
750         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
751         if (*ppmidQ == NULL)
752                 return -ENOMEM;
753         spin_lock(&GlobalMid_Lock);
754         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
755         spin_unlock(&GlobalMid_Lock);
756         return 0;
757 }
758
759 static int
760 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
761 {
762         int error;
763
764         error = wait_event_freezekillable_unsafe(server->response_q,
765                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
766         if (error < 0)
767                 return -ERESTARTSYS;
768
769         return 0;
770 }
771
772 struct mid_q_entry *
773 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
774 {
775         int rc;
776         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
777         struct mid_q_entry *mid;
778
779         if (rqst->rq_iov[0].iov_len != 4 ||
780             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
781                 return ERR_PTR(-EIO);
782
783         /* enable signing if server requires it */
784         if (server->sign)
785                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
786
787         mid = AllocMidQEntry(hdr, server);
788         if (mid == NULL)
789                 return ERR_PTR(-ENOMEM);
790
791         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
792         if (rc) {
793                 DeleteMidQEntry(mid);
794                 return ERR_PTR(rc);
795         }
796
797         return mid;
798 }
799
800 /*
801  * Send a SMB request and set the callback function in the mid to handle
802  * the result. Caller is responsible for dealing with timeouts.
803  */
804 int
805 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
806                 mid_receive_t *receive, mid_callback_t *callback,
807                 mid_handle_t *handle, void *cbdata, const int flags,
808                 const struct cifs_credits *exist_credits)
809 {
810         int rc;
811         struct mid_q_entry *mid;
812         struct cifs_credits credits = { .value = 0, .instance = 0 };
813         unsigned int instance;
814         int optype;
815
816         optype = flags & CIFS_OP_MASK;
817
818         if ((flags & CIFS_HAS_CREDITS) == 0) {
819                 rc = wait_for_free_request(server, flags, &instance);
820                 if (rc)
821                         return rc;
822                 credits.value = 1;
823                 credits.instance = instance;
824         } else
825                 instance = exist_credits->instance;
826
827         mutex_lock(&server->srv_mutex);
828
829         /*
830          * We can't use credits obtained from the previous session to send this
831          * request. Check if there were reconnects after we obtained credits and
832          * return -EAGAIN in such cases to let callers handle it.
833          */
834         if (instance != server->reconnect_instance) {
835                 mutex_unlock(&server->srv_mutex);
836                 add_credits_and_wake_if(server, &credits, optype);
837                 return -EAGAIN;
838         }
839
840         mid = server->ops->setup_async_request(server, rqst);
841         if (IS_ERR(mid)) {
842                 mutex_unlock(&server->srv_mutex);
843                 add_credits_and_wake_if(server, &credits, optype);
844                 return PTR_ERR(mid);
845         }
846
847         mid->receive = receive;
848         mid->callback = callback;
849         mid->callback_data = cbdata;
850         mid->handle = handle;
851         mid->mid_state = MID_REQUEST_SUBMITTED;
852
853         /* put it on the pending_mid_q */
854         spin_lock(&GlobalMid_Lock);
855         list_add_tail(&mid->qhead, &server->pending_mid_q);
856         spin_unlock(&GlobalMid_Lock);
857
858         /*
859          * Need to store the time in mid before calling I/O. For call_async,
860          * I/O response may come back and free the mid entry on another thread.
861          */
862         cifs_save_when_sent(mid);
863         cifs_in_send_inc(server);
864         rc = smb_send_rqst(server, 1, rqst, flags);
865         cifs_in_send_dec(server);
866
867         if (rc < 0) {
868                 revert_current_mid(server, mid->credits);
869                 server->sequence_number -= 2;
870                 cifs_delete_mid(mid);
871         }
872
873         mutex_unlock(&server->srv_mutex);
874
875         if (rc == 0)
876                 return 0;
877
878         add_credits_and_wake_if(server, &credits, optype);
879         return rc;
880 }
881
882 /*
883  *
884  * Send an SMB Request.  No response info (other than return code)
885  * needs to be parsed.
886  *
887  * flags indicate the type of request buffer and how long to wait
888  * and whether to log NT STATUS code (error) before mapping it to POSIX error
889  *
890  */
891 int
892 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
893                  char *in_buf, int flags)
894 {
895         int rc;
896         struct kvec iov[1];
897         struct kvec rsp_iov;
898         int resp_buf_type;
899
900         iov[0].iov_base = in_buf;
901         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
902         flags |= CIFS_NO_RSP_BUF;
903         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
904         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
905
906         return rc;
907 }
908
909 static int
910 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
911 {
912         int rc = 0;
913
914         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
915                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
916
917         spin_lock(&GlobalMid_Lock);
918         switch (mid->mid_state) {
919         case MID_RESPONSE_RECEIVED:
920                 spin_unlock(&GlobalMid_Lock);
921                 return rc;
922         case MID_RETRY_NEEDED:
923                 rc = -EAGAIN;
924                 break;
925         case MID_RESPONSE_MALFORMED:
926                 rc = -EIO;
927                 break;
928         case MID_SHUTDOWN:
929                 rc = -EHOSTDOWN;
930                 break;
931         default:
932                 if (!(mid->mid_flags & MID_DELETED)) {
933                         list_del_init(&mid->qhead);
934                         mid->mid_flags |= MID_DELETED;
935                 }
936                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
937                          __func__, mid->mid, mid->mid_state);
938                 rc = -EIO;
939         }
940         spin_unlock(&GlobalMid_Lock);
941
942         DeleteMidQEntry(mid);
943         return rc;
944 }
945
946 static inline int
947 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
948             struct mid_q_entry *mid)
949 {
950         return server->ops->send_cancel ?
951                                 server->ops->send_cancel(server, rqst, mid) : 0;
952 }
953
954 int
955 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
956                    bool log_error)
957 {
958         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
959
960         dump_smb(mid->resp_buf, min_t(u32, 92, len));
961
962         /* convert the length into a more usable form */
963         if (server->sign) {
964                 struct kvec iov[2];
965                 int rc = 0;
966                 struct smb_rqst rqst = { .rq_iov = iov,
967                                          .rq_nvec = 2 };
968
969                 iov[0].iov_base = mid->resp_buf;
970                 iov[0].iov_len = 4;
971                 iov[1].iov_base = (char *)mid->resp_buf + 4;
972                 iov[1].iov_len = len - 4;
973                 /* FIXME: add code to kill session */
974                 rc = cifs_verify_signature(&rqst, server,
975                                            mid->sequence_number);
976                 if (rc)
977                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
978                                  rc);
979         }
980
981         /* BB special case reconnect tid and uid here? */
982         return map_and_check_smb_error(mid, log_error);
983 }
984
985 struct mid_q_entry *
986 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
987                    struct smb_rqst *rqst)
988 {
989         int rc;
990         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
991         struct mid_q_entry *mid;
992
993         if (rqst->rq_iov[0].iov_len != 4 ||
994             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
995                 return ERR_PTR(-EIO);
996
997         rc = allocate_mid(ses, hdr, &mid);
998         if (rc)
999                 return ERR_PTR(rc);
1000         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1001         if (rc) {
1002                 cifs_delete_mid(mid);
1003                 return ERR_PTR(rc);
1004         }
1005         return mid;
1006 }
1007
1008 static void
1009 cifs_compound_callback(struct mid_q_entry *mid)
1010 {
1011         struct TCP_Server_Info *server = mid->server;
1012         struct cifs_credits credits;
1013
1014         credits.value = server->ops->get_credits(mid);
1015         credits.instance = server->reconnect_instance;
1016
1017         add_credits(server, &credits, mid->optype);
1018 }
1019
1020 static void
1021 cifs_compound_last_callback(struct mid_q_entry *mid)
1022 {
1023         cifs_compound_callback(mid);
1024         cifs_wake_up_task(mid);
1025 }
1026
1027 static void
1028 cifs_cancelled_callback(struct mid_q_entry *mid)
1029 {
1030         cifs_compound_callback(mid);
1031         DeleteMidQEntry(mid);
1032 }
1033
1034 /*
1035  * Return a channel (master if none) of @ses that can be used to send
1036  * regular requests.
1037  *
1038  * If we are currently binding a new channel (negprot/sess.setup),
1039  * return the new incomplete channel.
1040  */
1041 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1042 {
1043         uint index = 0;
1044
1045         if (!ses)
1046                 return NULL;
1047
1048         if (!ses->binding) {
1049                 /* round robin */
1050                 if (ses->chan_count > 1) {
1051                         index = (uint)atomic_inc_return(&ses->chan_seq);
1052                         index %= ses->chan_count;
1053                 }
1054                 return ses->chans[index].server;
1055         } else {
1056                 return cifs_ses_server(ses);
1057         }
1058 }
1059
1060 int
1061 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1062                    struct TCP_Server_Info *server,
1063                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1064                    int *resp_buf_type, struct kvec *resp_iov)
1065 {
1066         int i, j, optype, rc = 0;
1067         struct mid_q_entry *midQ[MAX_COMPOUND];
1068         bool cancelled_mid[MAX_COMPOUND] = {false};
1069         struct cifs_credits credits[MAX_COMPOUND] = {
1070                 { .value = 0, .instance = 0 }
1071         };
1072         unsigned int instance;
1073         char *buf;
1074
1075         optype = flags & CIFS_OP_MASK;
1076
1077         for (i = 0; i < num_rqst; i++)
1078                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1079
1080         if (!ses || !ses->server || !server) {
1081                 cifs_dbg(VFS, "Null session\n");
1082                 return -EIO;
1083         }
1084
1085         if (server->tcpStatus == CifsExiting)
1086                 return -ENOENT;
1087
1088         /*
1089          * Wait for all the requests to become available.
1090          * This approach still leaves the possibility to be stuck waiting for
1091          * credits if the server doesn't grant credits to the outstanding
1092          * requests and if the client is completely idle, not generating any
1093          * other requests.
1094          * This can be handled by the eventual session reconnect.
1095          */
1096         rc = wait_for_compound_request(server, num_rqst, flags,
1097                                        &instance);
1098         if (rc)
1099                 return rc;
1100
1101         for (i = 0; i < num_rqst; i++) {
1102                 credits[i].value = 1;
1103                 credits[i].instance = instance;
1104         }
1105
1106         /*
1107          * Make sure that we sign in the same order that we send on this socket
1108          * and avoid races inside tcp sendmsg code that could cause corruption
1109          * of smb data.
1110          */
1111
1112         mutex_lock(&server->srv_mutex);
1113
1114         /*
1115          * All the parts of the compound chain belong obtained credits from the
1116          * same session. We can not use credits obtained from the previous
1117          * session to send this request. Check if there were reconnects after
1118          * we obtained credits and return -EAGAIN in such cases to let callers
1119          * handle it.
1120          */
1121         if (instance != server->reconnect_instance) {
1122                 mutex_unlock(&server->srv_mutex);
1123                 for (j = 0; j < num_rqst; j++)
1124                         add_credits(server, &credits[j], optype);
1125                 return -EAGAIN;
1126         }
1127
1128         for (i = 0; i < num_rqst; i++) {
1129                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1130                 if (IS_ERR(midQ[i])) {
1131                         revert_current_mid(server, i);
1132                         for (j = 0; j < i; j++)
1133                                 cifs_delete_mid(midQ[j]);
1134                         mutex_unlock(&server->srv_mutex);
1135
1136                         /* Update # of requests on wire to server */
1137                         for (j = 0; j < num_rqst; j++)
1138                                 add_credits(server, &credits[j], optype);
1139                         return PTR_ERR(midQ[i]);
1140                 }
1141
1142                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1143                 midQ[i]->optype = optype;
1144                 /*
1145                  * Invoke callback for every part of the compound chain
1146                  * to calculate credits properly. Wake up this thread only when
1147                  * the last element is received.
1148                  */
1149                 if (i < num_rqst - 1)
1150                         midQ[i]->callback = cifs_compound_callback;
1151                 else
1152                         midQ[i]->callback = cifs_compound_last_callback;
1153         }
1154         cifs_in_send_inc(server);
1155         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1156         cifs_in_send_dec(server);
1157
1158         for (i = 0; i < num_rqst; i++)
1159                 cifs_save_when_sent(midQ[i]);
1160
1161         if (rc < 0) {
1162                 revert_current_mid(server, num_rqst);
1163                 server->sequence_number -= 2;
1164         }
1165
1166         mutex_unlock(&server->srv_mutex);
1167
1168         /*
1169          * If sending failed for some reason or it is an oplock break that we
1170          * will not receive a response to - return credits back
1171          */
1172         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1173                 for (i = 0; i < num_rqst; i++)
1174                         add_credits(server, &credits[i], optype);
1175                 goto out;
1176         }
1177
1178         /*
1179          * At this point the request is passed to the network stack - we assume
1180          * that any credits taken from the server structure on the client have
1181          * been spent and we can't return them back. Once we receive responses
1182          * we will collect credits granted by the server in the mid callbacks
1183          * and add those credits to the server structure.
1184          */
1185
1186         /*
1187          * Compounding is never used during session establish.
1188          */
1189         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1190                 mutex_lock(&server->srv_mutex);
1191                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1192                                            rqst[0].rq_nvec);
1193                 mutex_unlock(&server->srv_mutex);
1194         }
1195
1196         for (i = 0; i < num_rqst; i++) {
1197                 rc = wait_for_response(server, midQ[i]);
1198                 if (rc != 0)
1199                         break;
1200         }
1201         if (rc != 0) {
1202                 for (; i < num_rqst; i++) {
1203                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1204                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1205                         send_cancel(server, &rqst[i], midQ[i]);
1206                         spin_lock(&GlobalMid_Lock);
1207                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1208                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1209                                 midQ[i]->callback = cifs_cancelled_callback;
1210                                 cancelled_mid[i] = true;
1211                                 credits[i].value = 0;
1212                         }
1213                         spin_unlock(&GlobalMid_Lock);
1214                 }
1215         }
1216
1217         for (i = 0; i < num_rqst; i++) {
1218                 if (rc < 0)
1219                         goto out;
1220
1221                 rc = cifs_sync_mid_result(midQ[i], server);
1222                 if (rc != 0) {
1223                         /* mark this mid as cancelled to not free it below */
1224                         cancelled_mid[i] = true;
1225                         goto out;
1226                 }
1227
1228                 if (!midQ[i]->resp_buf ||
1229                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1230                         rc = -EIO;
1231                         cifs_dbg(FYI, "Bad MID state?\n");
1232                         goto out;
1233                 }
1234
1235                 buf = (char *)midQ[i]->resp_buf;
1236                 resp_iov[i].iov_base = buf;
1237                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1238                         server->vals->header_preamble_size;
1239
1240                 if (midQ[i]->large_buf)
1241                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1242                 else
1243                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1244
1245                 rc = server->ops->check_receive(midQ[i], server,
1246                                                      flags & CIFS_LOG_ERROR);
1247
1248                 /* mark it so buf will not be freed by cifs_delete_mid */
1249                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1250                         midQ[i]->resp_buf = NULL;
1251
1252         }
1253
1254         /*
1255          * Compounding is never used during session establish.
1256          */
1257         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1258                 struct kvec iov = {
1259                         .iov_base = resp_iov[0].iov_base,
1260                         .iov_len = resp_iov[0].iov_len
1261                 };
1262                 mutex_lock(&server->srv_mutex);
1263                 smb311_update_preauth_hash(ses, &iov, 1);
1264                 mutex_unlock(&server->srv_mutex);
1265         }
1266
1267 out:
1268         /*
1269          * This will dequeue all mids. After this it is important that the
1270          * demultiplex_thread will not process any of these mids any futher.
1271          * This is prevented above by using a noop callback that will not
1272          * wake this thread except for the very last PDU.
1273          */
1274         for (i = 0; i < num_rqst; i++) {
1275                 if (!cancelled_mid[i])
1276                         cifs_delete_mid(midQ[i]);
1277         }
1278
1279         return rc;
1280 }
1281
1282 int
1283 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1284                struct TCP_Server_Info *server,
1285                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1286                struct kvec *resp_iov)
1287 {
1288         return compound_send_recv(xid, ses, server, flags, 1,
1289                                   rqst, resp_buf_type, resp_iov);
1290 }
1291
1292 int
1293 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1294              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1295              const int flags, struct kvec *resp_iov)
1296 {
1297         struct smb_rqst rqst;
1298         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1299         int rc;
1300
1301         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1302                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1303                                         GFP_KERNEL);
1304                 if (!new_iov) {
1305                         /* otherwise cifs_send_recv below sets resp_buf_type */
1306                         *resp_buf_type = CIFS_NO_BUFFER;
1307                         return -ENOMEM;
1308                 }
1309         } else
1310                 new_iov = s_iov;
1311
1312         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1313         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1314
1315         new_iov[0].iov_base = new_iov[1].iov_base;
1316         new_iov[0].iov_len = 4;
1317         new_iov[1].iov_base += 4;
1318         new_iov[1].iov_len -= 4;
1319
1320         memset(&rqst, 0, sizeof(struct smb_rqst));
1321         rqst.rq_iov = new_iov;
1322         rqst.rq_nvec = n_vec + 1;
1323
1324         rc = cifs_send_recv(xid, ses, ses->server,
1325                             &rqst, resp_buf_type, flags, resp_iov);
1326         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1327                 kfree(new_iov);
1328         return rc;
1329 }
1330
1331 int
1332 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1333             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1334             int *pbytes_returned, const int flags)
1335 {
1336         int rc = 0;
1337         struct mid_q_entry *midQ;
1338         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1339         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1340         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1341         struct cifs_credits credits = { .value = 1, .instance = 0 };
1342         struct TCP_Server_Info *server;
1343
1344         if (ses == NULL) {
1345                 cifs_dbg(VFS, "Null smb session\n");
1346                 return -EIO;
1347         }
1348         server = ses->server;
1349         if (server == NULL) {
1350                 cifs_dbg(VFS, "Null tcp session\n");
1351                 return -EIO;
1352         }
1353
1354         if (server->tcpStatus == CifsExiting)
1355                 return -ENOENT;
1356
1357         /* Ensure that we do not send more than 50 overlapping requests
1358            to the same server. We may make this configurable later or
1359            use ses->maxReq */
1360
1361         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1362                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1363                                 len);
1364                 return -EIO;
1365         }
1366
1367         rc = wait_for_free_request(server, flags, &credits.instance);
1368         if (rc)
1369                 return rc;
1370
1371         /* make sure that we sign in the same order that we send on this socket
1372            and avoid races inside tcp sendmsg code that could cause corruption
1373            of smb data */
1374
1375         mutex_lock(&server->srv_mutex);
1376
1377         rc = allocate_mid(ses, in_buf, &midQ);
1378         if (rc) {
1379                 mutex_unlock(&server->srv_mutex);
1380                 /* Update # of requests on wire to server */
1381                 add_credits(server, &credits, 0);
1382                 return rc;
1383         }
1384
1385         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1386         if (rc) {
1387                 mutex_unlock(&server->srv_mutex);
1388                 goto out;
1389         }
1390
1391         midQ->mid_state = MID_REQUEST_SUBMITTED;
1392
1393         cifs_in_send_inc(server);
1394         rc = smb_send(server, in_buf, len);
1395         cifs_in_send_dec(server);
1396         cifs_save_when_sent(midQ);
1397
1398         if (rc < 0)
1399                 server->sequence_number -= 2;
1400
1401         mutex_unlock(&server->srv_mutex);
1402
1403         if (rc < 0)
1404                 goto out;
1405
1406         rc = wait_for_response(server, midQ);
1407         if (rc != 0) {
1408                 send_cancel(server, &rqst, midQ);
1409                 spin_lock(&GlobalMid_Lock);
1410                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1411                         /* no longer considered to be "in-flight" */
1412                         midQ->callback = DeleteMidQEntry;
1413                         spin_unlock(&GlobalMid_Lock);
1414                         add_credits(server, &credits, 0);
1415                         return rc;
1416                 }
1417                 spin_unlock(&GlobalMid_Lock);
1418         }
1419
1420         rc = cifs_sync_mid_result(midQ, server);
1421         if (rc != 0) {
1422                 add_credits(server, &credits, 0);
1423                 return rc;
1424         }
1425
1426         if (!midQ->resp_buf || !out_buf ||
1427             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1428                 rc = -EIO;
1429                 cifs_server_dbg(VFS, "Bad MID state?\n");
1430                 goto out;
1431         }
1432
1433         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1434         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1435         rc = cifs_check_receive(midQ, server, 0);
1436 out:
1437         cifs_delete_mid(midQ);
1438         add_credits(server, &credits, 0);
1439
1440         return rc;
1441 }
1442
1443 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1444    blocking lock to return. */
1445
1446 static int
1447 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1448                         struct smb_hdr *in_buf,
1449                         struct smb_hdr *out_buf)
1450 {
1451         int bytes_returned;
1452         struct cifs_ses *ses = tcon->ses;
1453         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1454
1455         /* We just modify the current in_buf to change
1456            the type of lock from LOCKING_ANDX_SHARED_LOCK
1457            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1458            LOCKING_ANDX_CANCEL_LOCK. */
1459
1460         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1461         pSMB->Timeout = 0;
1462         pSMB->hdr.Mid = get_next_mid(ses->server);
1463
1464         return SendReceive(xid, ses, in_buf, out_buf,
1465                         &bytes_returned, 0);
1466 }
1467
1468 int
1469 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1470             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1471             int *pbytes_returned)
1472 {
1473         int rc = 0;
1474         int rstart = 0;
1475         struct mid_q_entry *midQ;
1476         struct cifs_ses *ses;
1477         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1478         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1479         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1480         unsigned int instance;
1481         struct TCP_Server_Info *server;
1482
1483         if (tcon == NULL || tcon->ses == NULL) {
1484                 cifs_dbg(VFS, "Null smb session\n");
1485                 return -EIO;
1486         }
1487         ses = tcon->ses;
1488         server = ses->server;
1489
1490         if (server == NULL) {
1491                 cifs_dbg(VFS, "Null tcp session\n");
1492                 return -EIO;
1493         }
1494
1495         if (server->tcpStatus == CifsExiting)
1496                 return -ENOENT;
1497
1498         /* Ensure that we do not send more than 50 overlapping requests
1499            to the same server. We may make this configurable later or
1500            use ses->maxReq */
1501
1502         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1503                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1504                               len);
1505                 return -EIO;
1506         }
1507
1508         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1509         if (rc)
1510                 return rc;
1511
1512         /* make sure that we sign in the same order that we send on this socket
1513            and avoid races inside tcp sendmsg code that could cause corruption
1514            of smb data */
1515
1516         mutex_lock(&server->srv_mutex);
1517
1518         rc = allocate_mid(ses, in_buf, &midQ);
1519         if (rc) {
1520                 mutex_unlock(&server->srv_mutex);
1521                 return rc;
1522         }
1523
1524         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1525         if (rc) {
1526                 cifs_delete_mid(midQ);
1527                 mutex_unlock(&server->srv_mutex);
1528                 return rc;
1529         }
1530
1531         midQ->mid_state = MID_REQUEST_SUBMITTED;
1532         cifs_in_send_inc(server);
1533         rc = smb_send(server, in_buf, len);
1534         cifs_in_send_dec(server);
1535         cifs_save_when_sent(midQ);
1536
1537         if (rc < 0)
1538                 server->sequence_number -= 2;
1539
1540         mutex_unlock(&server->srv_mutex);
1541
1542         if (rc < 0) {
1543                 cifs_delete_mid(midQ);
1544                 return rc;
1545         }
1546
1547         /* Wait for a reply - allow signals to interrupt. */
1548         rc = wait_event_interruptible(server->response_q,
1549                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1550                 ((server->tcpStatus != CifsGood) &&
1551                  (server->tcpStatus != CifsNew)));
1552
1553         /* Were we interrupted by a signal ? */
1554         if ((rc == -ERESTARTSYS) &&
1555                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1556                 ((server->tcpStatus == CifsGood) ||
1557                  (server->tcpStatus == CifsNew))) {
1558
1559                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1560                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1561                            blocking lock to return. */
1562                         rc = send_cancel(server, &rqst, midQ);
1563                         if (rc) {
1564                                 cifs_delete_mid(midQ);
1565                                 return rc;
1566                         }
1567                 } else {
1568                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1569                            to cause the blocking lock to return. */
1570
1571                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1572
1573                         /* If we get -ENOLCK back the lock may have
1574                            already been removed. Don't exit in this case. */
1575                         if (rc && rc != -ENOLCK) {
1576                                 cifs_delete_mid(midQ);
1577                                 return rc;
1578                         }
1579                 }
1580
1581                 rc = wait_for_response(server, midQ);
1582                 if (rc) {
1583                         send_cancel(server, &rqst, midQ);
1584                         spin_lock(&GlobalMid_Lock);
1585                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1586                                 /* no longer considered to be "in-flight" */
1587                                 midQ->callback = DeleteMidQEntry;
1588                                 spin_unlock(&GlobalMid_Lock);
1589                                 return rc;
1590                         }
1591                         spin_unlock(&GlobalMid_Lock);
1592                 }
1593
1594                 /* We got the response - restart system call. */
1595                 rstart = 1;
1596         }
1597
1598         rc = cifs_sync_mid_result(midQ, server);
1599         if (rc != 0)
1600                 return rc;
1601
1602         /* rcvd frame is ok */
1603         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1604                 rc = -EIO;
1605                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1606                 goto out;
1607         }
1608
1609         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1610         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1611         rc = cifs_check_receive(midQ, server, 0);
1612 out:
1613         cifs_delete_mid(midQ);
1614         if (rstart && rc == -EACCES)
1615                 return -ERESTARTSYS;
1616         return rc;
1617 }