Merge tag 'arm-drivers-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include "cifspdu.h"
25 #include "cifsglob.h"
26 #include "cifsproto.h"
27 #include "cifs_debug.h"
28 #include "smb2proto.h"
29 #include "smbdirect.h"
30
31 /* Max number of iovectors we can use off the stack when sending requests. */
32 #define CIFS_MAX_IOV_SIZE 8
33
34 void
35 cifs_wake_up_task(struct mid_q_entry *mid)
36 {
37         wake_up_process(mid->callback_data);
38 }
39
40 struct mid_q_entry *
41 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
42 {
43         struct mid_q_entry *temp;
44
45         if (server == NULL) {
46                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
47                 return NULL;
48         }
49
50         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
51         memset(temp, 0, sizeof(struct mid_q_entry));
52         kref_init(&temp->refcount);
53         temp->mid = get_mid(smb_buffer);
54         temp->pid = current->pid;
55         temp->command = cpu_to_le16(smb_buffer->Command);
56         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
57         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
58         /* when mid allocated can be before when sent */
59         temp->when_alloc = jiffies;
60         temp->server = server;
61
62         /*
63          * The default is for the mid to be synchronous, so the
64          * default callback just wakes up the current task.
65          */
66         get_task_struct(current);
67         temp->creator = current;
68         temp->callback = cifs_wake_up_task;
69         temp->callback_data = current;
70
71         atomic_inc(&midCount);
72         temp->mid_state = MID_REQUEST_ALLOCATED;
73         return temp;
74 }
75
76 static void _cifs_mid_q_entry_release(struct kref *refcount)
77 {
78         struct mid_q_entry *midEntry =
79                         container_of(refcount, struct mid_q_entry, refcount);
80 #ifdef CONFIG_CIFS_STATS2
81         __le16 command = midEntry->server->vals->lock_cmd;
82         __u16 smb_cmd = le16_to_cpu(midEntry->command);
83         unsigned long now;
84         unsigned long roundtrip_time;
85 #endif
86         struct TCP_Server_Info *server = midEntry->server;
87
88         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90             server->ops->handle_cancelled_mid)
91                 server->ops->handle_cancelled_mid(midEntry, server);
92
93         midEntry->mid_state = MID_FREE;
94         atomic_dec(&midCount);
95         if (midEntry->large_buf)
96                 cifs_buf_release(midEntry->resp_buf);
97         else
98                 cifs_small_buf_release(midEntry->resp_buf);
99 #ifdef CONFIG_CIFS_STATS2
100         now = jiffies;
101         if (now < midEntry->when_alloc)
102                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
103         roundtrip_time = now - midEntry->when_alloc;
104
105         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107                         server->slowest_cmd[smb_cmd] = roundtrip_time;
108                         server->fastest_cmd[smb_cmd] = roundtrip_time;
109                 } else {
110                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
112                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
114                 }
115                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116                 server->time_per_cmd[smb_cmd] += roundtrip_time;
117         }
118         /*
119          * commands taking longer than one second (default) can be indications
120          * that something is wrong, unless it is quite a slow link or a very
121          * busy server. Note that this calc is unlikely or impossible to wrap
122          * as long as slow_rsp_threshold is not set way above recommended max
123          * value (32767 ie 9 hours) and is generally harmless even if wrong
124          * since only affects debug counters - so leaving the calc as simple
125          * comparison rather than doing multiple conversions and overflow
126          * checks
127          */
128         if ((slow_rsp_threshold != 0) &&
129             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
130             (midEntry->command != command)) {
131                 /*
132                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133                  * NB: le16_to_cpu returns unsigned so can not be negative below
134                  */
135                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
137
138                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
139                                midEntry->when_sent, midEntry->when_received);
140                 if (cifsFYI & CIFS_TIMER) {
141                         pr_debug("slow rsp: cmd %d mid %llu",
142                                  midEntry->command, midEntry->mid);
143                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144                                   now - midEntry->when_alloc,
145                                   now - midEntry->when_sent,
146                                   now - midEntry->when_received);
147                 }
148         }
149 #endif
150         put_task_struct(midEntry->creator);
151
152         mempool_free(midEntry, cifs_mid_poolp);
153 }
154
155 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156 {
157         spin_lock(&GlobalMid_Lock);
158         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159         spin_unlock(&GlobalMid_Lock);
160 }
161
162 void DeleteMidQEntry(struct mid_q_entry *midEntry)
163 {
164         cifs_mid_q_entry_release(midEntry);
165 }
166
167 void
168 cifs_delete_mid(struct mid_q_entry *mid)
169 {
170         spin_lock(&GlobalMid_Lock);
171         if (!(mid->mid_flags & MID_DELETED)) {
172                 list_del_init(&mid->qhead);
173                 mid->mid_flags |= MID_DELETED;
174         }
175         spin_unlock(&GlobalMid_Lock);
176
177         DeleteMidQEntry(mid);
178 }
179
180 /*
181  * smb_send_kvec - send an array of kvecs to the server
182  * @server:     Server to send the data to
183  * @smb_msg:    Message to send
184  * @sent:       amount of data sent on socket is stored here
185  *
186  * Our basic "send data to server" function. Should be called with srv_mutex
187  * held. The caller is responsible for handling the results.
188  */
189 static int
190 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191               size_t *sent)
192 {
193         int rc = 0;
194         int retries = 0;
195         struct socket *ssocket = server->ssocket;
196
197         *sent = 0;
198
199         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200         smb_msg->msg_namelen = sizeof(struct sockaddr);
201         smb_msg->msg_control = NULL;
202         smb_msg->msg_controllen = 0;
203         if (server->noblocksnd)
204                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
205         else
206                 smb_msg->msg_flags = MSG_NOSIGNAL;
207
208         while (msg_data_left(smb_msg)) {
209                 /*
210                  * If blocking send, we try 3 times, since each can block
211                  * for 5 seconds. For nonblocking  we have to try more
212                  * but wait increasing amounts of time allowing time for
213                  * socket to clear.  The overall time we wait in either
214                  * case to send on the socket is about 15 seconds.
215                  * Similarly we wait for 15 seconds for a response from
216                  * the server in SendReceive[2] for the server to send
217                  * a response back for most types of requests (except
218                  * SMB Write past end of file which can be slow, and
219                  * blocking lock operations). NFS waits slightly longer
220                  * than CIFS, but this can make it take longer for
221                  * nonresponsive servers to be detected and 15 seconds
222                  * is more than enough time for modern networks to
223                  * send a packet.  In most cases if we fail to send
224                  * after the retries we will kill the socket and
225                  * reconnect which may clear the network problem.
226                  */
227                 rc = sock_sendmsg(ssocket, smb_msg);
228                 if (rc == -EAGAIN) {
229                         retries++;
230                         if (retries >= 14 ||
231                             (!server->noblocksnd && (retries > 2))) {
232                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
233                                          ssocket);
234                                 return -EAGAIN;
235                         }
236                         msleep(1 << retries);
237                         continue;
238                 }
239
240                 if (rc < 0)
241                         return rc;
242
243                 if (rc == 0) {
244                         /* should never happen, letting socket clear before
245                            retrying is our only obvious option here */
246                         cifs_server_dbg(VFS, "tcp sent no data\n");
247                         msleep(500);
248                         continue;
249                 }
250
251                 /* send was at least partially successful */
252                 *sent += rc;
253                 retries = 0; /* in case we get ENOSPC on the next send */
254         }
255         return 0;
256 }
257
258 unsigned long
259 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
260 {
261         unsigned int i;
262         struct kvec *iov;
263         int nvec;
264         unsigned long buflen = 0;
265
266         if (server->vals->header_preamble_size == 0 &&
267             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
268                 iov = &rqst->rq_iov[1];
269                 nvec = rqst->rq_nvec - 1;
270         } else {
271                 iov = rqst->rq_iov;
272                 nvec = rqst->rq_nvec;
273         }
274
275         /* total up iov array first */
276         for (i = 0; i < nvec; i++)
277                 buflen += iov[i].iov_len;
278
279         /*
280          * Add in the page array if there is one. The caller needs to make
281          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282          * multiple pages ends at page boundary, rq_tailsz needs to be set to
283          * PAGE_SIZE.
284          */
285         if (rqst->rq_npages) {
286                 if (rqst->rq_npages == 1)
287                         buflen += rqst->rq_tailsz;
288                 else {
289                         /*
290                          * If there is more than one page, calculate the
291                          * buffer length based on rq_offset and rq_tailsz
292                          */
293                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294                                         rqst->rq_offset;
295                         buflen += rqst->rq_tailsz;
296                 }
297         }
298
299         return buflen;
300 }
301
302 static int
303 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304                 struct smb_rqst *rqst)
305 {
306         int rc = 0;
307         struct kvec *iov;
308         int n_vec;
309         unsigned int send_length = 0;
310         unsigned int i, j;
311         sigset_t mask, oldmask;
312         size_t total_len = 0, sent, size;
313         struct socket *ssocket = server->ssocket;
314         struct msghdr smb_msg;
315         __be32 rfc1002_marker;
316
317         if (cifs_rdma_enabled(server)) {
318                 /* return -EAGAIN when connecting or reconnecting */
319                 rc = -EAGAIN;
320                 if (server->smbd_conn)
321                         rc = smbd_send(server, num_rqst, rqst);
322                 goto smbd_done;
323         }
324
325         if (ssocket == NULL)
326                 return -EAGAIN;
327
328         if (fatal_signal_pending(current)) {
329                 cifs_dbg(FYI, "signal pending before send request\n");
330                 return -ERESTARTSYS;
331         }
332
333         /* cork the socket */
334         tcp_sock_set_cork(ssocket->sk, true);
335
336         for (j = 0; j < num_rqst; j++)
337                 send_length += smb_rqst_len(server, &rqst[j]);
338         rfc1002_marker = cpu_to_be32(send_length);
339
340         /*
341          * We should not allow signals to interrupt the network send because
342          * any partial send will cause session reconnects thus increasing
343          * latency of system calls and overload a server with unnecessary
344          * requests.
345          */
346
347         sigfillset(&mask);
348         sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
350         /* Generate a rfc1002 marker for SMB2+ */
351         if (server->vals->header_preamble_size == 0) {
352                 struct kvec hiov = {
353                         .iov_base = &rfc1002_marker,
354                         .iov_len  = 4
355                 };
356                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
357                 rc = smb_send_kvec(server, &smb_msg, &sent);
358                 if (rc < 0)
359                         goto unmask;
360
361                 total_len += sent;
362                 send_length += 4;
363         }
364
365         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
367         for (j = 0; j < num_rqst; j++) {
368                 iov = rqst[j].rq_iov;
369                 n_vec = rqst[j].rq_nvec;
370
371                 size = 0;
372                 for (i = 0; i < n_vec; i++) {
373                         dump_smb(iov[i].iov_base, iov[i].iov_len);
374                         size += iov[i].iov_len;
375                 }
376
377                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
378
379                 rc = smb_send_kvec(server, &smb_msg, &sent);
380                 if (rc < 0)
381                         goto unmask;
382
383                 total_len += sent;
384
385                 /* now walk the page array and send each page in it */
386                 for (i = 0; i < rqst[j].rq_npages; i++) {
387                         struct bio_vec bvec;
388
389                         bvec.bv_page = rqst[j].rq_pages[i];
390                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391                                              &bvec.bv_offset);
392
393                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
394                                       &bvec, 1, bvec.bv_len);
395                         rc = smb_send_kvec(server, &smb_msg, &sent);
396                         if (rc < 0)
397                                 break;
398
399                         total_len += sent;
400                 }
401         }
402
403 unmask:
404         sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406         /*
407          * If signal is pending but we have already sent the whole packet to
408          * the server we need to return success status to allow a corresponding
409          * mid entry to be kept in the pending requests queue thus allowing
410          * to handle responses from the server by the client.
411          *
412          * If only part of the packet has been sent there is no need to hide
413          * interrupt because the session will be reconnected anyway, so there
414          * won't be any response from the server to handle.
415          */
416
417         if (signal_pending(current) && (total_len != send_length)) {
418                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
419                 rc = -ERESTARTSYS;
420         }
421
422         /* uncork it */
423         tcp_sock_set_cork(ssocket->sk, false);
424
425         if ((total_len > 0) && (total_len != send_length)) {
426                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
427                          send_length, total_len);
428                 /*
429                  * If we have only sent part of an SMB then the next SMB could
430                  * be taken as the remainder of this one. We need to kill the
431                  * socket so the server throws away the partial SMB
432                  */
433                 cifs_signal_cifsd_for_reconnect(server, false);
434                 trace_smb3_partial_send_reconnect(server->CurrentMid,
435                                                   server->conn_id, server->hostname);
436         }
437 smbd_done:
438         if (rc < 0 && rc != -EINTR)
439                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
440                          rc);
441         else if (rc > 0)
442                 rc = 0;
443
444         return rc;
445 }
446
447 static int
448 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
449               struct smb_rqst *rqst, int flags)
450 {
451         struct kvec iov;
452         struct smb2_transform_hdr *tr_hdr;
453         struct smb_rqst cur_rqst[MAX_COMPOUND];
454         int rc;
455
456         if (!(flags & CIFS_TRANSFORM_REQ))
457                 return __smb_send_rqst(server, num_rqst, rqst);
458
459         if (num_rqst > MAX_COMPOUND - 1)
460                 return -ENOMEM;
461
462         if (!server->ops->init_transform_rq) {
463                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
464                 return -EIO;
465         }
466
467         tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
468         if (!tr_hdr)
469                 return -ENOMEM;
470
471         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
472         memset(&iov, 0, sizeof(iov));
473
474         iov.iov_base = tr_hdr;
475         iov.iov_len = sizeof(*tr_hdr);
476         cur_rqst[0].rq_iov = &iov;
477         cur_rqst[0].rq_nvec = 1;
478
479         rc = server->ops->init_transform_rq(server, num_rqst + 1,
480                                             &cur_rqst[0], rqst);
481         if (rc)
482                 goto out;
483
484         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
485         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
486 out:
487         kfree(tr_hdr);
488         return rc;
489 }
490
491 int
492 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493          unsigned int smb_buf_length)
494 {
495         struct kvec iov[2];
496         struct smb_rqst rqst = { .rq_iov = iov,
497                                  .rq_nvec = 2 };
498
499         iov[0].iov_base = smb_buffer;
500         iov[0].iov_len = 4;
501         iov[1].iov_base = (char *)smb_buffer + 4;
502         iov[1].iov_len = smb_buf_length;
503
504         return __smb_send_rqst(server, 1, &rqst);
505 }
506
507 static int
508 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
509                       const int timeout, const int flags,
510                       unsigned int *instance)
511 {
512         long rc;
513         int *credits;
514         int optype;
515         long int t;
516         int scredits, in_flight;
517
518         if (timeout < 0)
519                 t = MAX_JIFFY_OFFSET;
520         else
521                 t = msecs_to_jiffies(timeout);
522
523         optype = flags & CIFS_OP_MASK;
524
525         *instance = 0;
526
527         credits = server->ops->get_credits_field(server, optype);
528         /* Since an echo is already inflight, no need to wait to send another */
529         if (*credits <= 0 && optype == CIFS_ECHO_OP)
530                 return -EAGAIN;
531
532         spin_lock(&server->req_lock);
533         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
534                 /* oplock breaks must not be held up */
535                 server->in_flight++;
536                 if (server->in_flight > server->max_in_flight)
537                         server->max_in_flight = server->in_flight;
538                 *credits -= 1;
539                 *instance = server->reconnect_instance;
540                 scredits = *credits;
541                 in_flight = server->in_flight;
542                 spin_unlock(&server->req_lock);
543
544                 trace_smb3_nblk_credits(server->CurrentMid,
545                                 server->conn_id, server->hostname, scredits, -1, in_flight);
546                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
547                                 __func__, 1, scredits);
548
549                 return 0;
550         }
551
552         while (1) {
553                 if (*credits < num_credits) {
554                         scredits = *credits;
555                         spin_unlock(&server->req_lock);
556
557                         cifs_num_waiters_inc(server);
558                         rc = wait_event_killable_timeout(server->request_q,
559                                 has_credits(server, credits, num_credits), t);
560                         cifs_num_waiters_dec(server);
561                         if (!rc) {
562                                 spin_lock(&server->req_lock);
563                                 scredits = *credits;
564                                 in_flight = server->in_flight;
565                                 spin_unlock(&server->req_lock);
566
567                                 trace_smb3_credit_timeout(server->CurrentMid,
568                                                 server->conn_id, server->hostname, scredits,
569                                                 num_credits, in_flight);
570                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
571                                                 timeout);
572                                 return -EBUSY;
573                         }
574                         if (rc == -ERESTARTSYS)
575                                 return -ERESTARTSYS;
576                         spin_lock(&server->req_lock);
577                 } else {
578                         spin_unlock(&server->req_lock);
579
580                         spin_lock(&cifs_tcp_ses_lock);
581                         if (server->tcpStatus == CifsExiting) {
582                                 spin_unlock(&cifs_tcp_ses_lock);
583                                 return -ENOENT;
584                         }
585                         spin_unlock(&cifs_tcp_ses_lock);
586
587                         /*
588                          * For normal commands, reserve the last MAX_COMPOUND
589                          * credits to compound requests.
590                          * Otherwise these compounds could be permanently
591                          * starved for credits by single-credit requests.
592                          *
593                          * To prevent spinning CPU, block this thread until
594                          * there are >MAX_COMPOUND credits available.
595                          * But only do this is we already have a lot of
596                          * credits in flight to avoid triggering this check
597                          * for servers that are slow to hand out credits on
598                          * new sessions.
599                          */
600                         spin_lock(&server->req_lock);
601                         if (!optype && num_credits == 1 &&
602                             server->in_flight > 2 * MAX_COMPOUND &&
603                             *credits <= MAX_COMPOUND) {
604                                 spin_unlock(&server->req_lock);
605
606                                 cifs_num_waiters_inc(server);
607                                 rc = wait_event_killable_timeout(
608                                         server->request_q,
609                                         has_credits(server, credits,
610                                                     MAX_COMPOUND + 1),
611                                         t);
612                                 cifs_num_waiters_dec(server);
613                                 if (!rc) {
614                                         spin_lock(&server->req_lock);
615                                         scredits = *credits;
616                                         in_flight = server->in_flight;
617                                         spin_unlock(&server->req_lock);
618
619                                         trace_smb3_credit_timeout(
620                                                         server->CurrentMid,
621                                                         server->conn_id, server->hostname,
622                                                         scredits, num_credits, in_flight);
623                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
624                                                         timeout);
625                                         return -EBUSY;
626                                 }
627                                 if (rc == -ERESTARTSYS)
628                                         return -ERESTARTSYS;
629                                 spin_lock(&server->req_lock);
630                                 continue;
631                         }
632
633                         /*
634                          * Can not count locking commands against total
635                          * as they are allowed to block on server.
636                          */
637
638                         /* update # of requests on the wire to server */
639                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
640                                 *credits -= num_credits;
641                                 server->in_flight += num_credits;
642                                 if (server->in_flight > server->max_in_flight)
643                                         server->max_in_flight = server->in_flight;
644                                 *instance = server->reconnect_instance;
645                         }
646                         scredits = *credits;
647                         in_flight = server->in_flight;
648                         spin_unlock(&server->req_lock);
649
650                         trace_smb3_waitff_credits(server->CurrentMid,
651                                         server->conn_id, server->hostname, scredits,
652                                         -(num_credits), in_flight);
653                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
654                                         __func__, num_credits, scredits);
655                         break;
656                 }
657         }
658         return 0;
659 }
660
661 static int
662 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
663                       unsigned int *instance)
664 {
665         return wait_for_free_credits(server, 1, -1, flags,
666                                      instance);
667 }
668
669 static int
670 wait_for_compound_request(struct TCP_Server_Info *server, int num,
671                           const int flags, unsigned int *instance)
672 {
673         int *credits;
674         int scredits, in_flight;
675
676         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
677
678         spin_lock(&server->req_lock);
679         scredits = *credits;
680         in_flight = server->in_flight;
681
682         if (*credits < num) {
683                 /*
684                  * If the server is tight on resources or just gives us less
685                  * credits for other reasons (e.g. requests are coming out of
686                  * order and the server delays granting more credits until it
687                  * processes a missing mid) and we exhausted most available
688                  * credits there may be situations when we try to send
689                  * a compound request but we don't have enough credits. At this
690                  * point the client needs to decide if it should wait for
691                  * additional credits or fail the request. If at least one
692                  * request is in flight there is a high probability that the
693                  * server will return enough credits to satisfy this compound
694                  * request.
695                  *
696                  * Return immediately if no requests in flight since we will be
697                  * stuck on waiting for credits.
698                  */
699                 if (server->in_flight == 0) {
700                         spin_unlock(&server->req_lock);
701                         trace_smb3_insufficient_credits(server->CurrentMid,
702                                         server->conn_id, server->hostname, scredits,
703                                         num, in_flight);
704                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
705                                         __func__, in_flight, num, scredits);
706                         return -EDEADLK;
707                 }
708         }
709         spin_unlock(&server->req_lock);
710
711         return wait_for_free_credits(server, num, 60000, flags,
712                                      instance);
713 }
714
715 int
716 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
717                       unsigned int *num, struct cifs_credits *credits)
718 {
719         *num = size;
720         credits->value = 0;
721         credits->instance = server->reconnect_instance;
722         return 0;
723 }
724
725 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
726                         struct mid_q_entry **ppmidQ)
727 {
728         spin_lock(&cifs_tcp_ses_lock);
729         if (ses->status == CifsNew) {
730                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
731                         (in_buf->Command != SMB_COM_NEGOTIATE)) {
732                         spin_unlock(&cifs_tcp_ses_lock);
733                         return -EAGAIN;
734                 }
735                 /* else ok - we are setting up session */
736         }
737
738         if (ses->status == CifsExiting) {
739                 /* check if SMB session is bad because we are setting it up */
740                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
741                         spin_unlock(&cifs_tcp_ses_lock);
742                         return -EAGAIN;
743                 }
744                 /* else ok - we are shutting down session */
745         }
746         spin_unlock(&cifs_tcp_ses_lock);
747
748         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
749         if (*ppmidQ == NULL)
750                 return -ENOMEM;
751         spin_lock(&GlobalMid_Lock);
752         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
753         spin_unlock(&GlobalMid_Lock);
754         return 0;
755 }
756
757 static int
758 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
759 {
760         int error;
761
762         error = wait_event_freezekillable_unsafe(server->response_q,
763                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
764         if (error < 0)
765                 return -ERESTARTSYS;
766
767         return 0;
768 }
769
770 struct mid_q_entry *
771 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
772 {
773         int rc;
774         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
775         struct mid_q_entry *mid;
776
777         if (rqst->rq_iov[0].iov_len != 4 ||
778             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
779                 return ERR_PTR(-EIO);
780
781         /* enable signing if server requires it */
782         if (server->sign)
783                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
784
785         mid = AllocMidQEntry(hdr, server);
786         if (mid == NULL)
787                 return ERR_PTR(-ENOMEM);
788
789         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
790         if (rc) {
791                 DeleteMidQEntry(mid);
792                 return ERR_PTR(rc);
793         }
794
795         return mid;
796 }
797
798 /*
799  * Send a SMB request and set the callback function in the mid to handle
800  * the result. Caller is responsible for dealing with timeouts.
801  */
802 int
803 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
804                 mid_receive_t *receive, mid_callback_t *callback,
805                 mid_handle_t *handle, void *cbdata, const int flags,
806                 const struct cifs_credits *exist_credits)
807 {
808         int rc;
809         struct mid_q_entry *mid;
810         struct cifs_credits credits = { .value = 0, .instance = 0 };
811         unsigned int instance;
812         int optype;
813
814         optype = flags & CIFS_OP_MASK;
815
816         if ((flags & CIFS_HAS_CREDITS) == 0) {
817                 rc = wait_for_free_request(server, flags, &instance);
818                 if (rc)
819                         return rc;
820                 credits.value = 1;
821                 credits.instance = instance;
822         } else
823                 instance = exist_credits->instance;
824
825         mutex_lock(&server->srv_mutex);
826
827         /*
828          * We can't use credits obtained from the previous session to send this
829          * request. Check if there were reconnects after we obtained credits and
830          * return -EAGAIN in such cases to let callers handle it.
831          */
832         if (instance != server->reconnect_instance) {
833                 mutex_unlock(&server->srv_mutex);
834                 add_credits_and_wake_if(server, &credits, optype);
835                 return -EAGAIN;
836         }
837
838         mid = server->ops->setup_async_request(server, rqst);
839         if (IS_ERR(mid)) {
840                 mutex_unlock(&server->srv_mutex);
841                 add_credits_and_wake_if(server, &credits, optype);
842                 return PTR_ERR(mid);
843         }
844
845         mid->receive = receive;
846         mid->callback = callback;
847         mid->callback_data = cbdata;
848         mid->handle = handle;
849         mid->mid_state = MID_REQUEST_SUBMITTED;
850
851         /* put it on the pending_mid_q */
852         spin_lock(&GlobalMid_Lock);
853         list_add_tail(&mid->qhead, &server->pending_mid_q);
854         spin_unlock(&GlobalMid_Lock);
855
856         /*
857          * Need to store the time in mid before calling I/O. For call_async,
858          * I/O response may come back and free the mid entry on another thread.
859          */
860         cifs_save_when_sent(mid);
861         cifs_in_send_inc(server);
862         rc = smb_send_rqst(server, 1, rqst, flags);
863         cifs_in_send_dec(server);
864
865         if (rc < 0) {
866                 revert_current_mid(server, mid->credits);
867                 server->sequence_number -= 2;
868                 cifs_delete_mid(mid);
869         }
870
871         mutex_unlock(&server->srv_mutex);
872
873         if (rc == 0)
874                 return 0;
875
876         add_credits_and_wake_if(server, &credits, optype);
877         return rc;
878 }
879
880 /*
881  *
882  * Send an SMB Request.  No response info (other than return code)
883  * needs to be parsed.
884  *
885  * flags indicate the type of request buffer and how long to wait
886  * and whether to log NT STATUS code (error) before mapping it to POSIX error
887  *
888  */
889 int
890 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
891                  char *in_buf, int flags)
892 {
893         int rc;
894         struct kvec iov[1];
895         struct kvec rsp_iov;
896         int resp_buf_type;
897
898         iov[0].iov_base = in_buf;
899         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
900         flags |= CIFS_NO_RSP_BUF;
901         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
902         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
903
904         return rc;
905 }
906
907 static int
908 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
909 {
910         int rc = 0;
911
912         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
913                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
914
915         spin_lock(&GlobalMid_Lock);
916         switch (mid->mid_state) {
917         case MID_RESPONSE_RECEIVED:
918                 spin_unlock(&GlobalMid_Lock);
919                 return rc;
920         case MID_RETRY_NEEDED:
921                 rc = -EAGAIN;
922                 break;
923         case MID_RESPONSE_MALFORMED:
924                 rc = -EIO;
925                 break;
926         case MID_SHUTDOWN:
927                 rc = -EHOSTDOWN;
928                 break;
929         default:
930                 if (!(mid->mid_flags & MID_DELETED)) {
931                         list_del_init(&mid->qhead);
932                         mid->mid_flags |= MID_DELETED;
933                 }
934                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
935                          __func__, mid->mid, mid->mid_state);
936                 rc = -EIO;
937         }
938         spin_unlock(&GlobalMid_Lock);
939
940         DeleteMidQEntry(mid);
941         return rc;
942 }
943
944 static inline int
945 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
946             struct mid_q_entry *mid)
947 {
948         return server->ops->send_cancel ?
949                                 server->ops->send_cancel(server, rqst, mid) : 0;
950 }
951
952 int
953 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
954                    bool log_error)
955 {
956         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
957
958         dump_smb(mid->resp_buf, min_t(u32, 92, len));
959
960         /* convert the length into a more usable form */
961         if (server->sign) {
962                 struct kvec iov[2];
963                 int rc = 0;
964                 struct smb_rqst rqst = { .rq_iov = iov,
965                                          .rq_nvec = 2 };
966
967                 iov[0].iov_base = mid->resp_buf;
968                 iov[0].iov_len = 4;
969                 iov[1].iov_base = (char *)mid->resp_buf + 4;
970                 iov[1].iov_len = len - 4;
971                 /* FIXME: add code to kill session */
972                 rc = cifs_verify_signature(&rqst, server,
973                                            mid->sequence_number);
974                 if (rc)
975                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
976                                  rc);
977         }
978
979         /* BB special case reconnect tid and uid here? */
980         return map_and_check_smb_error(mid, log_error);
981 }
982
983 struct mid_q_entry *
984 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
985                    struct smb_rqst *rqst)
986 {
987         int rc;
988         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
989         struct mid_q_entry *mid;
990
991         if (rqst->rq_iov[0].iov_len != 4 ||
992             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
993                 return ERR_PTR(-EIO);
994
995         rc = allocate_mid(ses, hdr, &mid);
996         if (rc)
997                 return ERR_PTR(rc);
998         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
999         if (rc) {
1000                 cifs_delete_mid(mid);
1001                 return ERR_PTR(rc);
1002         }
1003         return mid;
1004 }
1005
1006 static void
1007 cifs_compound_callback(struct mid_q_entry *mid)
1008 {
1009         struct TCP_Server_Info *server = mid->server;
1010         struct cifs_credits credits;
1011
1012         credits.value = server->ops->get_credits(mid);
1013         credits.instance = server->reconnect_instance;
1014
1015         add_credits(server, &credits, mid->optype);
1016 }
1017
1018 static void
1019 cifs_compound_last_callback(struct mid_q_entry *mid)
1020 {
1021         cifs_compound_callback(mid);
1022         cifs_wake_up_task(mid);
1023 }
1024
1025 static void
1026 cifs_cancelled_callback(struct mid_q_entry *mid)
1027 {
1028         cifs_compound_callback(mid);
1029         DeleteMidQEntry(mid);
1030 }
1031
1032 /*
1033  * Return a channel (master if none) of @ses that can be used to send
1034  * regular requests.
1035  *
1036  * If we are currently binding a new channel (negprot/sess.setup),
1037  * return the new incomplete channel.
1038  */
1039 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1040 {
1041         uint index = 0;
1042
1043         if (!ses)
1044                 return NULL;
1045
1046         /* round robin */
1047         index = (uint)atomic_inc_return(&ses->chan_seq);
1048
1049         spin_lock(&ses->chan_lock);
1050         index %= ses->chan_count;
1051         spin_unlock(&ses->chan_lock);
1052
1053         return ses->chans[index].server;
1054 }
1055
1056 int
1057 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1058                    struct TCP_Server_Info *server,
1059                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1060                    int *resp_buf_type, struct kvec *resp_iov)
1061 {
1062         int i, j, optype, rc = 0;
1063         struct mid_q_entry *midQ[MAX_COMPOUND];
1064         bool cancelled_mid[MAX_COMPOUND] = {false};
1065         struct cifs_credits credits[MAX_COMPOUND] = {
1066                 { .value = 0, .instance = 0 }
1067         };
1068         unsigned int instance;
1069         char *buf;
1070
1071         optype = flags & CIFS_OP_MASK;
1072
1073         for (i = 0; i < num_rqst; i++)
1074                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1075
1076         if (!ses || !ses->server || !server) {
1077                 cifs_dbg(VFS, "Null session\n");
1078                 return -EIO;
1079         }
1080
1081         spin_lock(&cifs_tcp_ses_lock);
1082         if (server->tcpStatus == CifsExiting) {
1083                 spin_unlock(&cifs_tcp_ses_lock);
1084                 return -ENOENT;
1085         }
1086         spin_unlock(&cifs_tcp_ses_lock);
1087
1088         /*
1089          * Wait for all the requests to become available.
1090          * This approach still leaves the possibility to be stuck waiting for
1091          * credits if the server doesn't grant credits to the outstanding
1092          * requests and if the client is completely idle, not generating any
1093          * other requests.
1094          * This can be handled by the eventual session reconnect.
1095          */
1096         rc = wait_for_compound_request(server, num_rqst, flags,
1097                                        &instance);
1098         if (rc)
1099                 return rc;
1100
1101         for (i = 0; i < num_rqst; i++) {
1102                 credits[i].value = 1;
1103                 credits[i].instance = instance;
1104         }
1105
1106         /*
1107          * Make sure that we sign in the same order that we send on this socket
1108          * and avoid races inside tcp sendmsg code that could cause corruption
1109          * of smb data.
1110          */
1111
1112         mutex_lock(&server->srv_mutex);
1113
1114         /*
1115          * All the parts of the compound chain belong obtained credits from the
1116          * same session. We can not use credits obtained from the previous
1117          * session to send this request. Check if there were reconnects after
1118          * we obtained credits and return -EAGAIN in such cases to let callers
1119          * handle it.
1120          */
1121         if (instance != server->reconnect_instance) {
1122                 mutex_unlock(&server->srv_mutex);
1123                 for (j = 0; j < num_rqst; j++)
1124                         add_credits(server, &credits[j], optype);
1125                 return -EAGAIN;
1126         }
1127
1128         for (i = 0; i < num_rqst; i++) {
1129                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1130                 if (IS_ERR(midQ[i])) {
1131                         revert_current_mid(server, i);
1132                         for (j = 0; j < i; j++)
1133                                 cifs_delete_mid(midQ[j]);
1134                         mutex_unlock(&server->srv_mutex);
1135
1136                         /* Update # of requests on wire to server */
1137                         for (j = 0; j < num_rqst; j++)
1138                                 add_credits(server, &credits[j], optype);
1139                         return PTR_ERR(midQ[i]);
1140                 }
1141
1142                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1143                 midQ[i]->optype = optype;
1144                 /*
1145                  * Invoke callback for every part of the compound chain
1146                  * to calculate credits properly. Wake up this thread only when
1147                  * the last element is received.
1148                  */
1149                 if (i < num_rqst - 1)
1150                         midQ[i]->callback = cifs_compound_callback;
1151                 else
1152                         midQ[i]->callback = cifs_compound_last_callback;
1153         }
1154         cifs_in_send_inc(server);
1155         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1156         cifs_in_send_dec(server);
1157
1158         for (i = 0; i < num_rqst; i++)
1159                 cifs_save_when_sent(midQ[i]);
1160
1161         if (rc < 0) {
1162                 revert_current_mid(server, num_rqst);
1163                 server->sequence_number -= 2;
1164         }
1165
1166         mutex_unlock(&server->srv_mutex);
1167
1168         /*
1169          * If sending failed for some reason or it is an oplock break that we
1170          * will not receive a response to - return credits back
1171          */
1172         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1173                 for (i = 0; i < num_rqst; i++)
1174                         add_credits(server, &credits[i], optype);
1175                 goto out;
1176         }
1177
1178         /*
1179          * At this point the request is passed to the network stack - we assume
1180          * that any credits taken from the server structure on the client have
1181          * been spent and we can't return them back. Once we receive responses
1182          * we will collect credits granted by the server in the mid callbacks
1183          * and add those credits to the server structure.
1184          */
1185
1186         /*
1187          * Compounding is never used during session establish.
1188          */
1189         spin_lock(&cifs_tcp_ses_lock);
1190         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1191                 spin_unlock(&cifs_tcp_ses_lock);
1192
1193                 mutex_lock(&server->srv_mutex);
1194                 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1195                 mutex_unlock(&server->srv_mutex);
1196
1197                 spin_lock(&cifs_tcp_ses_lock);
1198         }
1199         spin_unlock(&cifs_tcp_ses_lock);
1200
1201         for (i = 0; i < num_rqst; i++) {
1202                 rc = wait_for_response(server, midQ[i]);
1203                 if (rc != 0)
1204                         break;
1205         }
1206         if (rc != 0) {
1207                 for (; i < num_rqst; i++) {
1208                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1209                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1210                         send_cancel(server, &rqst[i], midQ[i]);
1211                         spin_lock(&GlobalMid_Lock);
1212                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1213                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1214                                 midQ[i]->callback = cifs_cancelled_callback;
1215                                 cancelled_mid[i] = true;
1216                                 credits[i].value = 0;
1217                         }
1218                         spin_unlock(&GlobalMid_Lock);
1219                 }
1220         }
1221
1222         for (i = 0; i < num_rqst; i++) {
1223                 if (rc < 0)
1224                         goto out;
1225
1226                 rc = cifs_sync_mid_result(midQ[i], server);
1227                 if (rc != 0) {
1228                         /* mark this mid as cancelled to not free it below */
1229                         cancelled_mid[i] = true;
1230                         goto out;
1231                 }
1232
1233                 if (!midQ[i]->resp_buf ||
1234                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1235                         rc = -EIO;
1236                         cifs_dbg(FYI, "Bad MID state?\n");
1237                         goto out;
1238                 }
1239
1240                 buf = (char *)midQ[i]->resp_buf;
1241                 resp_iov[i].iov_base = buf;
1242                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1243                         server->vals->header_preamble_size;
1244
1245                 if (midQ[i]->large_buf)
1246                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1247                 else
1248                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1249
1250                 rc = server->ops->check_receive(midQ[i], server,
1251                                                      flags & CIFS_LOG_ERROR);
1252
1253                 /* mark it so buf will not be freed by cifs_delete_mid */
1254                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1255                         midQ[i]->resp_buf = NULL;
1256
1257         }
1258
1259         /*
1260          * Compounding is never used during session establish.
1261          */
1262         spin_lock(&cifs_tcp_ses_lock);
1263         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1264                 struct kvec iov = {
1265                         .iov_base = resp_iov[0].iov_base,
1266                         .iov_len = resp_iov[0].iov_len
1267                 };
1268                 spin_unlock(&cifs_tcp_ses_lock);
1269                 mutex_lock(&server->srv_mutex);
1270                 smb311_update_preauth_hash(ses, server, &iov, 1);
1271                 mutex_unlock(&server->srv_mutex);
1272                 spin_lock(&cifs_tcp_ses_lock);
1273         }
1274         spin_unlock(&cifs_tcp_ses_lock);
1275
1276 out:
1277         /*
1278          * This will dequeue all mids. After this it is important that the
1279          * demultiplex_thread will not process any of these mids any futher.
1280          * This is prevented above by using a noop callback that will not
1281          * wake this thread except for the very last PDU.
1282          */
1283         for (i = 0; i < num_rqst; i++) {
1284                 if (!cancelled_mid[i])
1285                         cifs_delete_mid(midQ[i]);
1286         }
1287
1288         return rc;
1289 }
1290
1291 int
1292 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1293                struct TCP_Server_Info *server,
1294                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1295                struct kvec *resp_iov)
1296 {
1297         return compound_send_recv(xid, ses, server, flags, 1,
1298                                   rqst, resp_buf_type, resp_iov);
1299 }
1300
1301 int
1302 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1303              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1304              const int flags, struct kvec *resp_iov)
1305 {
1306         struct smb_rqst rqst;
1307         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1308         int rc;
1309
1310         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1311                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1312                                         GFP_KERNEL);
1313                 if (!new_iov) {
1314                         /* otherwise cifs_send_recv below sets resp_buf_type */
1315                         *resp_buf_type = CIFS_NO_BUFFER;
1316                         return -ENOMEM;
1317                 }
1318         } else
1319                 new_iov = s_iov;
1320
1321         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1322         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1323
1324         new_iov[0].iov_base = new_iov[1].iov_base;
1325         new_iov[0].iov_len = 4;
1326         new_iov[1].iov_base += 4;
1327         new_iov[1].iov_len -= 4;
1328
1329         memset(&rqst, 0, sizeof(struct smb_rqst));
1330         rqst.rq_iov = new_iov;
1331         rqst.rq_nvec = n_vec + 1;
1332
1333         rc = cifs_send_recv(xid, ses, ses->server,
1334                             &rqst, resp_buf_type, flags, resp_iov);
1335         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1336                 kfree(new_iov);
1337         return rc;
1338 }
1339
1340 int
1341 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1342             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1343             int *pbytes_returned, const int flags)
1344 {
1345         int rc = 0;
1346         struct mid_q_entry *midQ;
1347         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1348         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1349         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1350         struct cifs_credits credits = { .value = 1, .instance = 0 };
1351         struct TCP_Server_Info *server;
1352
1353         if (ses == NULL) {
1354                 cifs_dbg(VFS, "Null smb session\n");
1355                 return -EIO;
1356         }
1357         server = ses->server;
1358         if (server == NULL) {
1359                 cifs_dbg(VFS, "Null tcp session\n");
1360                 return -EIO;
1361         }
1362
1363         spin_lock(&cifs_tcp_ses_lock);
1364         if (server->tcpStatus == CifsExiting) {
1365                 spin_unlock(&cifs_tcp_ses_lock);
1366                 return -ENOENT;
1367         }
1368         spin_unlock(&cifs_tcp_ses_lock);
1369
1370         /* Ensure that we do not send more than 50 overlapping requests
1371            to the same server. We may make this configurable later or
1372            use ses->maxReq */
1373
1374         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1375                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1376                                 len);
1377                 return -EIO;
1378         }
1379
1380         rc = wait_for_free_request(server, flags, &credits.instance);
1381         if (rc)
1382                 return rc;
1383
1384         /* make sure that we sign in the same order that we send on this socket
1385            and avoid races inside tcp sendmsg code that could cause corruption
1386            of smb data */
1387
1388         mutex_lock(&server->srv_mutex);
1389
1390         rc = allocate_mid(ses, in_buf, &midQ);
1391         if (rc) {
1392                 mutex_unlock(&server->srv_mutex);
1393                 /* Update # of requests on wire to server */
1394                 add_credits(server, &credits, 0);
1395                 return rc;
1396         }
1397
1398         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1399         if (rc) {
1400                 mutex_unlock(&server->srv_mutex);
1401                 goto out;
1402         }
1403
1404         midQ->mid_state = MID_REQUEST_SUBMITTED;
1405
1406         cifs_in_send_inc(server);
1407         rc = smb_send(server, in_buf, len);
1408         cifs_in_send_dec(server);
1409         cifs_save_when_sent(midQ);
1410
1411         if (rc < 0)
1412                 server->sequence_number -= 2;
1413
1414         mutex_unlock(&server->srv_mutex);
1415
1416         if (rc < 0)
1417                 goto out;
1418
1419         rc = wait_for_response(server, midQ);
1420         if (rc != 0) {
1421                 send_cancel(server, &rqst, midQ);
1422                 spin_lock(&GlobalMid_Lock);
1423                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1424                         /* no longer considered to be "in-flight" */
1425                         midQ->callback = DeleteMidQEntry;
1426                         spin_unlock(&GlobalMid_Lock);
1427                         add_credits(server, &credits, 0);
1428                         return rc;
1429                 }
1430                 spin_unlock(&GlobalMid_Lock);
1431         }
1432
1433         rc = cifs_sync_mid_result(midQ, server);
1434         if (rc != 0) {
1435                 add_credits(server, &credits, 0);
1436                 return rc;
1437         }
1438
1439         if (!midQ->resp_buf || !out_buf ||
1440             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1441                 rc = -EIO;
1442                 cifs_server_dbg(VFS, "Bad MID state?\n");
1443                 goto out;
1444         }
1445
1446         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1447         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1448         rc = cifs_check_receive(midQ, server, 0);
1449 out:
1450         cifs_delete_mid(midQ);
1451         add_credits(server, &credits, 0);
1452
1453         return rc;
1454 }
1455
1456 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1457    blocking lock to return. */
1458
1459 static int
1460 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1461                         struct smb_hdr *in_buf,
1462                         struct smb_hdr *out_buf)
1463 {
1464         int bytes_returned;
1465         struct cifs_ses *ses = tcon->ses;
1466         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1467
1468         /* We just modify the current in_buf to change
1469            the type of lock from LOCKING_ANDX_SHARED_LOCK
1470            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1471            LOCKING_ANDX_CANCEL_LOCK. */
1472
1473         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1474         pSMB->Timeout = 0;
1475         pSMB->hdr.Mid = get_next_mid(ses->server);
1476
1477         return SendReceive(xid, ses, in_buf, out_buf,
1478                         &bytes_returned, 0);
1479 }
1480
1481 int
1482 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1483             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1484             int *pbytes_returned)
1485 {
1486         int rc = 0;
1487         int rstart = 0;
1488         struct mid_q_entry *midQ;
1489         struct cifs_ses *ses;
1490         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1491         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1492         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1493         unsigned int instance;
1494         struct TCP_Server_Info *server;
1495
1496         if (tcon == NULL || tcon->ses == NULL) {
1497                 cifs_dbg(VFS, "Null smb session\n");
1498                 return -EIO;
1499         }
1500         ses = tcon->ses;
1501         server = ses->server;
1502
1503         if (server == NULL) {
1504                 cifs_dbg(VFS, "Null tcp session\n");
1505                 return -EIO;
1506         }
1507
1508         spin_lock(&cifs_tcp_ses_lock);
1509         if (server->tcpStatus == CifsExiting) {
1510                 spin_unlock(&cifs_tcp_ses_lock);
1511                 return -ENOENT;
1512         }
1513         spin_unlock(&cifs_tcp_ses_lock);
1514
1515         /* Ensure that we do not send more than 50 overlapping requests
1516            to the same server. We may make this configurable later or
1517            use ses->maxReq */
1518
1519         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1520                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1521                               len);
1522                 return -EIO;
1523         }
1524
1525         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1526         if (rc)
1527                 return rc;
1528
1529         /* make sure that we sign in the same order that we send on this socket
1530            and avoid races inside tcp sendmsg code that could cause corruption
1531            of smb data */
1532
1533         mutex_lock(&server->srv_mutex);
1534
1535         rc = allocate_mid(ses, in_buf, &midQ);
1536         if (rc) {
1537                 mutex_unlock(&server->srv_mutex);
1538                 return rc;
1539         }
1540
1541         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1542         if (rc) {
1543                 cifs_delete_mid(midQ);
1544                 mutex_unlock(&server->srv_mutex);
1545                 return rc;
1546         }
1547
1548         midQ->mid_state = MID_REQUEST_SUBMITTED;
1549         cifs_in_send_inc(server);
1550         rc = smb_send(server, in_buf, len);
1551         cifs_in_send_dec(server);
1552         cifs_save_when_sent(midQ);
1553
1554         if (rc < 0)
1555                 server->sequence_number -= 2;
1556
1557         mutex_unlock(&server->srv_mutex);
1558
1559         if (rc < 0) {
1560                 cifs_delete_mid(midQ);
1561                 return rc;
1562         }
1563
1564         /* Wait for a reply - allow signals to interrupt. */
1565         rc = wait_event_interruptible(server->response_q,
1566                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1567                 ((server->tcpStatus != CifsGood) &&
1568                  (server->tcpStatus != CifsNew)));
1569
1570         /* Were we interrupted by a signal ? */
1571         spin_lock(&cifs_tcp_ses_lock);
1572         if ((rc == -ERESTARTSYS) &&
1573                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1574                 ((server->tcpStatus == CifsGood) ||
1575                  (server->tcpStatus == CifsNew))) {
1576                 spin_unlock(&cifs_tcp_ses_lock);
1577
1578                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1579                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1580                            blocking lock to return. */
1581                         rc = send_cancel(server, &rqst, midQ);
1582                         if (rc) {
1583                                 cifs_delete_mid(midQ);
1584                                 return rc;
1585                         }
1586                 } else {
1587                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1588                            to cause the blocking lock to return. */
1589
1590                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1591
1592                         /* If we get -ENOLCK back the lock may have
1593                            already been removed. Don't exit in this case. */
1594                         if (rc && rc != -ENOLCK) {
1595                                 cifs_delete_mid(midQ);
1596                                 return rc;
1597                         }
1598                 }
1599
1600                 rc = wait_for_response(server, midQ);
1601                 if (rc) {
1602                         send_cancel(server, &rqst, midQ);
1603                         spin_lock(&GlobalMid_Lock);
1604                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1605                                 /* no longer considered to be "in-flight" */
1606                                 midQ->callback = DeleteMidQEntry;
1607                                 spin_unlock(&GlobalMid_Lock);
1608                                 return rc;
1609                         }
1610                         spin_unlock(&GlobalMid_Lock);
1611                 }
1612
1613                 /* We got the response - restart system call. */
1614                 rstart = 1;
1615                 spin_lock(&cifs_tcp_ses_lock);
1616         }
1617         spin_unlock(&cifs_tcp_ses_lock);
1618
1619         rc = cifs_sync_mid_result(midQ, server);
1620         if (rc != 0)
1621                 return rc;
1622
1623         /* rcvd frame is ok */
1624         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1625                 rc = -EIO;
1626                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1627                 goto out;
1628         }
1629
1630         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1631         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1632         rc = cifs_check_receive(midQ, server, 0);
1633 out:
1634         cifs_delete_mid(midQ);
1635         if (rstart && rc == -EACCES)
1636                 return -ERESTARTSYS;
1637         return rc;
1638 }