Merge tag 'ext4-for-linus-5.8-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         get_task_struct(current);
80         temp->creator = current;
81         temp->callback = cifs_wake_up_task;
82         temp->callback_data = current;
83
84         atomic_inc(&midCount);
85         temp->mid_state = MID_REQUEST_ALLOCATED;
86         return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91         struct mid_q_entry *midEntry =
92                         container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94         __le16 command = midEntry->server->vals->lock_cmd;
95         __u16 smb_cmd = le16_to_cpu(midEntry->command);
96         unsigned long now;
97         unsigned long roundtrip_time;
98 #endif
99         struct TCP_Server_Info *server = midEntry->server;
100
101         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103             server->ops->handle_cancelled_mid)
104                 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106         midEntry->mid_state = MID_FREE;
107         atomic_dec(&midCount);
108         if (midEntry->large_buf)
109                 cifs_buf_release(midEntry->resp_buf);
110         else
111                 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113         now = jiffies;
114         if (now < midEntry->when_alloc)
115                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116         roundtrip_time = now - midEntry->when_alloc;
117
118         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120                         server->slowest_cmd[smb_cmd] = roundtrip_time;
121                         server->fastest_cmd[smb_cmd] = roundtrip_time;
122                 } else {
123                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
125                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
127                 }
128                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129                 server->time_per_cmd[smb_cmd] += roundtrip_time;
130         }
131         /*
132          * commands taking longer than one second (default) can be indications
133          * that something is wrong, unless it is quite a slow link or a very
134          * busy server. Note that this calc is unlikely or impossible to wrap
135          * as long as slow_rsp_threshold is not set way above recommended max
136          * value (32767 ie 9 hours) and is generally harmless even if wrong
137          * since only affects debug counters - so leaving the calc as simple
138          * comparison rather than doing multiple conversions and overflow
139          * checks
140          */
141         if ((slow_rsp_threshold != 0) &&
142             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143             (midEntry->command != command)) {
144                 /*
145                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146                  * NB: le16_to_cpu returns unsigned so can not be negative below
147                  */
148                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152                                midEntry->when_sent, midEntry->when_received);
153                 if (cifsFYI & CIFS_TIMER) {
154                         pr_debug("slow rsp: cmd %d mid %llu",
155                                  midEntry->command, midEntry->mid);
156                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157                                   now - midEntry->when_alloc,
158                                   now - midEntry->when_sent,
159                                   now - midEntry->when_received);
160                 }
161         }
162 #endif
163         put_task_struct(midEntry->creator);
164
165         mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170         spin_lock(&GlobalMid_Lock);
171         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172         spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177         cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183         spin_lock(&GlobalMid_Lock);
184         if (!(mid->mid_flags & MID_DELETED)) {
185                 list_del_init(&mid->qhead);
186                 mid->mid_flags |= MID_DELETED;
187         }
188         spin_unlock(&GlobalMid_Lock);
189
190         DeleteMidQEntry(mid);
191 }
192
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:     Server to send the data to
196  * @smb_msg:    Message to send
197  * @sent:       amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204               size_t *sent)
205 {
206         int rc = 0;
207         int retries = 0;
208         struct socket *ssocket = server->ssocket;
209
210         *sent = 0;
211
212         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213         smb_msg->msg_namelen = sizeof(struct sockaddr);
214         smb_msg->msg_control = NULL;
215         smb_msg->msg_controllen = 0;
216         if (server->noblocksnd)
217                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218         else
219                 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221         while (msg_data_left(smb_msg)) {
222                 /*
223                  * If blocking send, we try 3 times, since each can block
224                  * for 5 seconds. For nonblocking  we have to try more
225                  * but wait increasing amounts of time allowing time for
226                  * socket to clear.  The overall time we wait in either
227                  * case to send on the socket is about 15 seconds.
228                  * Similarly we wait for 15 seconds for a response from
229                  * the server in SendReceive[2] for the server to send
230                  * a response back for most types of requests (except
231                  * SMB Write past end of file which can be slow, and
232                  * blocking lock operations). NFS waits slightly longer
233                  * than CIFS, but this can make it take longer for
234                  * nonresponsive servers to be detected and 15 seconds
235                  * is more than enough time for modern networks to
236                  * send a packet.  In most cases if we fail to send
237                  * after the retries we will kill the socket and
238                  * reconnect which may clear the network problem.
239                  */
240                 rc = sock_sendmsg(ssocket, smb_msg);
241                 if (rc == -EAGAIN) {
242                         retries++;
243                         if (retries >= 14 ||
244                             (!server->noblocksnd && (retries > 2))) {
245                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246                                          ssocket);
247                                 return -EAGAIN;
248                         }
249                         msleep(1 << retries);
250                         continue;
251                 }
252
253                 if (rc < 0)
254                         return rc;
255
256                 if (rc == 0) {
257                         /* should never happen, letting socket clear before
258                            retrying is our only obvious option here */
259                         cifs_server_dbg(VFS, "tcp sent no data\n");
260                         msleep(500);
261                         continue;
262                 }
263
264                 /* send was at least partially successful */
265                 *sent += rc;
266                 retries = 0; /* in case we get ENOSPC on the next send */
267         }
268         return 0;
269 }
270
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274         unsigned int i;
275         struct kvec *iov;
276         int nvec;
277         unsigned long buflen = 0;
278
279         if (server->vals->header_preamble_size == 0 &&
280             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281                 iov = &rqst->rq_iov[1];
282                 nvec = rqst->rq_nvec - 1;
283         } else {
284                 iov = rqst->rq_iov;
285                 nvec = rqst->rq_nvec;
286         }
287
288         /* total up iov array first */
289         for (i = 0; i < nvec; i++)
290                 buflen += iov[i].iov_len;
291
292         /*
293          * Add in the page array if there is one. The caller needs to make
294          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295          * multiple pages ends at page boundary, rq_tailsz needs to be set to
296          * PAGE_SIZE.
297          */
298         if (rqst->rq_npages) {
299                 if (rqst->rq_npages == 1)
300                         buflen += rqst->rq_tailsz;
301                 else {
302                         /*
303                          * If there is more than one page, calculate the
304                          * buffer length based on rq_offset and rq_tailsz
305                          */
306                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307                                         rqst->rq_offset;
308                         buflen += rqst->rq_tailsz;
309                 }
310         }
311
312         return buflen;
313 }
314
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317                 struct smb_rqst *rqst)
318 {
319         int rc = 0;
320         struct kvec *iov;
321         int n_vec;
322         unsigned int send_length = 0;
323         unsigned int i, j;
324         sigset_t mask, oldmask;
325         size_t total_len = 0, sent, size;
326         struct socket *ssocket = server->ssocket;
327         struct msghdr smb_msg;
328         __be32 rfc1002_marker;
329
330         if (cifs_rdma_enabled(server)) {
331                 /* return -EAGAIN when connecting or reconnecting */
332                 rc = -EAGAIN;
333                 if (server->smbd_conn)
334                         rc = smbd_send(server, num_rqst, rqst);
335                 goto smbd_done;
336         }
337
338         if (ssocket == NULL)
339                 return -EAGAIN;
340
341         if (signal_pending(current)) {
342                 cifs_dbg(FYI, "signal is pending before sending any data\n");
343                 return -EINTR;
344         }
345
346         /* cork the socket */
347         tcp_sock_set_cork(ssocket->sk, true);
348
349         for (j = 0; j < num_rqst; j++)
350                 send_length += smb_rqst_len(server, &rqst[j]);
351         rfc1002_marker = cpu_to_be32(send_length);
352
353         /*
354          * We should not allow signals to interrupt the network send because
355          * any partial send will cause session reconnects thus increasing
356          * latency of system calls and overload a server with unnecessary
357          * requests.
358          */
359
360         sigfillset(&mask);
361         sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
363         /* Generate a rfc1002 marker for SMB2+ */
364         if (server->vals->header_preamble_size == 0) {
365                 struct kvec hiov = {
366                         .iov_base = &rfc1002_marker,
367                         .iov_len  = 4
368                 };
369                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370                 rc = smb_send_kvec(server, &smb_msg, &sent);
371                 if (rc < 0)
372                         goto unmask;
373
374                 total_len += sent;
375                 send_length += 4;
376         }
377
378         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
380         for (j = 0; j < num_rqst; j++) {
381                 iov = rqst[j].rq_iov;
382                 n_vec = rqst[j].rq_nvec;
383
384                 size = 0;
385                 for (i = 0; i < n_vec; i++) {
386                         dump_smb(iov[i].iov_base, iov[i].iov_len);
387                         size += iov[i].iov_len;
388                 }
389
390                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
391
392                 rc = smb_send_kvec(server, &smb_msg, &sent);
393                 if (rc < 0)
394                         goto unmask;
395
396                 total_len += sent;
397
398                 /* now walk the page array and send each page in it */
399                 for (i = 0; i < rqst[j].rq_npages; i++) {
400                         struct bio_vec bvec;
401
402                         bvec.bv_page = rqst[j].rq_pages[i];
403                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404                                              &bvec.bv_offset);
405
406                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407                                       &bvec, 1, bvec.bv_len);
408                         rc = smb_send_kvec(server, &smb_msg, &sent);
409                         if (rc < 0)
410                                 break;
411
412                         total_len += sent;
413                 }
414         }
415
416 unmask:
417         sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419         /*
420          * If signal is pending but we have already sent the whole packet to
421          * the server we need to return success status to allow a corresponding
422          * mid entry to be kept in the pending requests queue thus allowing
423          * to handle responses from the server by the client.
424          *
425          * If only part of the packet has been sent there is no need to hide
426          * interrupt because the session will be reconnected anyway, so there
427          * won't be any response from the server to handle.
428          */
429
430         if (signal_pending(current) && (total_len != send_length)) {
431                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432                 rc = -EINTR;
433         }
434
435         /* uncork it */
436         tcp_sock_set_cork(ssocket->sk, false);
437
438         if ((total_len > 0) && (total_len != send_length)) {
439                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440                          send_length, total_len);
441                 /*
442                  * If we have only sent part of an SMB then the next SMB could
443                  * be taken as the remainder of this one. We need to kill the
444                  * socket so the server throws away the partial SMB
445                  */
446                 server->tcpStatus = CifsNeedReconnect;
447                 trace_smb3_partial_send_reconnect(server->CurrentMid,
448                                                   server->hostname);
449         }
450 smbd_done:
451         if (rc < 0 && rc != -EINTR)
452                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453                          rc);
454         else if (rc > 0)
455                 rc = 0;
456
457         return rc;
458 }
459
460 static int
461 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462               struct smb_rqst *rqst, int flags)
463 {
464         struct kvec iov;
465         struct smb2_transform_hdr *tr_hdr;
466         struct smb_rqst cur_rqst[MAX_COMPOUND];
467         int rc;
468
469         if (!(flags & CIFS_TRANSFORM_REQ))
470                 return __smb_send_rqst(server, num_rqst, rqst);
471
472         if (num_rqst > MAX_COMPOUND - 1)
473                 return -ENOMEM;
474
475         if (!server->ops->init_transform_rq) {
476                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
477                 return -EIO;
478         }
479
480         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481         if (!tr_hdr)
482                 return -ENOMEM;
483
484         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485         memset(&iov, 0, sizeof(iov));
486         memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488         iov.iov_base = tr_hdr;
489         iov.iov_len = sizeof(*tr_hdr);
490         cur_rqst[0].rq_iov = &iov;
491         cur_rqst[0].rq_nvec = 1;
492
493         rc = server->ops->init_transform_rq(server, num_rqst + 1,
494                                             &cur_rqst[0], rqst);
495         if (rc)
496                 goto out;
497
498         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
500 out:
501         kfree(tr_hdr);
502         return rc;
503 }
504
505 int
506 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507          unsigned int smb_buf_length)
508 {
509         struct kvec iov[2];
510         struct smb_rqst rqst = { .rq_iov = iov,
511                                  .rq_nvec = 2 };
512
513         iov[0].iov_base = smb_buffer;
514         iov[0].iov_len = 4;
515         iov[1].iov_base = (char *)smb_buffer + 4;
516         iov[1].iov_len = smb_buf_length;
517
518         return __smb_send_rqst(server, 1, &rqst);
519 }
520
521 static int
522 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
523                       const int timeout, const int flags,
524                       unsigned int *instance)
525 {
526         int rc;
527         int *credits;
528         int optype;
529         long int t;
530
531         if (timeout < 0)
532                 t = MAX_JIFFY_OFFSET;
533         else
534                 t = msecs_to_jiffies(timeout);
535
536         optype = flags & CIFS_OP_MASK;
537
538         *instance = 0;
539
540         credits = server->ops->get_credits_field(server, optype);
541         /* Since an echo is already inflight, no need to wait to send another */
542         if (*credits <= 0 && optype == CIFS_ECHO_OP)
543                 return -EAGAIN;
544
545         spin_lock(&server->req_lock);
546         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
547                 /* oplock breaks must not be held up */
548                 server->in_flight++;
549                 if (server->in_flight > server->max_in_flight)
550                         server->max_in_flight = server->in_flight;
551                 *credits -= 1;
552                 *instance = server->reconnect_instance;
553                 spin_unlock(&server->req_lock);
554                 return 0;
555         }
556
557         while (1) {
558                 if (*credits < num_credits) {
559                         spin_unlock(&server->req_lock);
560                         cifs_num_waiters_inc(server);
561                         rc = wait_event_killable_timeout(server->request_q,
562                                 has_credits(server, credits, num_credits), t);
563                         cifs_num_waiters_dec(server);
564                         if (!rc) {
565                                 trace_smb3_credit_timeout(server->CurrentMid,
566                                         server->hostname, num_credits);
567                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
568                                          timeout);
569                                 return -ENOTSUPP;
570                         }
571                         if (rc == -ERESTARTSYS)
572                                 return -ERESTARTSYS;
573                         spin_lock(&server->req_lock);
574                 } else {
575                         if (server->tcpStatus == CifsExiting) {
576                                 spin_unlock(&server->req_lock);
577                                 return -ENOENT;
578                         }
579
580                         /*
581                          * For normal commands, reserve the last MAX_COMPOUND
582                          * credits to compound requests.
583                          * Otherwise these compounds could be permanently
584                          * starved for credits by single-credit requests.
585                          *
586                          * To prevent spinning CPU, block this thread until
587                          * there are >MAX_COMPOUND credits available.
588                          * But only do this is we already have a lot of
589                          * credits in flight to avoid triggering this check
590                          * for servers that are slow to hand out credits on
591                          * new sessions.
592                          */
593                         if (!optype && num_credits == 1 &&
594                             server->in_flight > 2 * MAX_COMPOUND &&
595                             *credits <= MAX_COMPOUND) {
596                                 spin_unlock(&server->req_lock);
597                                 cifs_num_waiters_inc(server);
598                                 rc = wait_event_killable_timeout(
599                                         server->request_q,
600                                         has_credits(server, credits,
601                                                     MAX_COMPOUND + 1),
602                                         t);
603                                 cifs_num_waiters_dec(server);
604                                 if (!rc) {
605                                         trace_smb3_credit_timeout(
606                                                 server->CurrentMid,
607                                                 server->hostname, num_credits);
608                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
609                                                  timeout);
610                                         return -ENOTSUPP;
611                                 }
612                                 if (rc == -ERESTARTSYS)
613                                         return -ERESTARTSYS;
614                                 spin_lock(&server->req_lock);
615                                 continue;
616                         }
617
618                         /*
619                          * Can not count locking commands against total
620                          * as they are allowed to block on server.
621                          */
622
623                         /* update # of requests on the wire to server */
624                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
625                                 *credits -= num_credits;
626                                 server->in_flight += num_credits;
627                                 if (server->in_flight > server->max_in_flight)
628                                         server->max_in_flight = server->in_flight;
629                                 *instance = server->reconnect_instance;
630                         }
631                         spin_unlock(&server->req_lock);
632                         break;
633                 }
634         }
635         return 0;
636 }
637
638 static int
639 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
640                       unsigned int *instance)
641 {
642         return wait_for_free_credits(server, 1, -1, flags,
643                                      instance);
644 }
645
646 static int
647 wait_for_compound_request(struct TCP_Server_Info *server, int num,
648                           const int flags, unsigned int *instance)
649 {
650         int *credits;
651
652         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
653
654         spin_lock(&server->req_lock);
655         if (*credits < num) {
656                 /*
657                  * Return immediately if not too many requests in flight since
658                  * we will likely be stuck on waiting for credits.
659                  */
660                 if (server->in_flight < num - *credits) {
661                         spin_unlock(&server->req_lock);
662                         return -ENOTSUPP;
663                 }
664         }
665         spin_unlock(&server->req_lock);
666
667         return wait_for_free_credits(server, num, 60000, flags,
668                                      instance);
669 }
670
671 int
672 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
673                       unsigned int *num, struct cifs_credits *credits)
674 {
675         *num = size;
676         credits->value = 0;
677         credits->instance = server->reconnect_instance;
678         return 0;
679 }
680
681 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
682                         struct mid_q_entry **ppmidQ)
683 {
684         if (ses->server->tcpStatus == CifsExiting) {
685                 return -ENOENT;
686         }
687
688         if (ses->server->tcpStatus == CifsNeedReconnect) {
689                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
690                 return -EAGAIN;
691         }
692
693         if (ses->status == CifsNew) {
694                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
695                         (in_buf->Command != SMB_COM_NEGOTIATE))
696                         return -EAGAIN;
697                 /* else ok - we are setting up session */
698         }
699
700         if (ses->status == CifsExiting) {
701                 /* check if SMB session is bad because we are setting it up */
702                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
703                         return -EAGAIN;
704                 /* else ok - we are shutting down session */
705         }
706
707         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
708         if (*ppmidQ == NULL)
709                 return -ENOMEM;
710         spin_lock(&GlobalMid_Lock);
711         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
712         spin_unlock(&GlobalMid_Lock);
713         return 0;
714 }
715
716 static int
717 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
718 {
719         int error;
720
721         error = wait_event_freezekillable_unsafe(server->response_q,
722                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
723         if (error < 0)
724                 return -ERESTARTSYS;
725
726         return 0;
727 }
728
729 struct mid_q_entry *
730 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
731 {
732         int rc;
733         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
734         struct mid_q_entry *mid;
735
736         if (rqst->rq_iov[0].iov_len != 4 ||
737             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
738                 return ERR_PTR(-EIO);
739
740         /* enable signing if server requires it */
741         if (server->sign)
742                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
743
744         mid = AllocMidQEntry(hdr, server);
745         if (mid == NULL)
746                 return ERR_PTR(-ENOMEM);
747
748         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
749         if (rc) {
750                 DeleteMidQEntry(mid);
751                 return ERR_PTR(rc);
752         }
753
754         return mid;
755 }
756
757 /*
758  * Send a SMB request and set the callback function in the mid to handle
759  * the result. Caller is responsible for dealing with timeouts.
760  */
761 int
762 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
763                 mid_receive_t *receive, mid_callback_t *callback,
764                 mid_handle_t *handle, void *cbdata, const int flags,
765                 const struct cifs_credits *exist_credits)
766 {
767         int rc;
768         struct mid_q_entry *mid;
769         struct cifs_credits credits = { .value = 0, .instance = 0 };
770         unsigned int instance;
771         int optype;
772
773         optype = flags & CIFS_OP_MASK;
774
775         if ((flags & CIFS_HAS_CREDITS) == 0) {
776                 rc = wait_for_free_request(server, flags, &instance);
777                 if (rc)
778                         return rc;
779                 credits.value = 1;
780                 credits.instance = instance;
781         } else
782                 instance = exist_credits->instance;
783
784         mutex_lock(&server->srv_mutex);
785
786         /*
787          * We can't use credits obtained from the previous session to send this
788          * request. Check if there were reconnects after we obtained credits and
789          * return -EAGAIN in such cases to let callers handle it.
790          */
791         if (instance != server->reconnect_instance) {
792                 mutex_unlock(&server->srv_mutex);
793                 add_credits_and_wake_if(server, &credits, optype);
794                 return -EAGAIN;
795         }
796
797         mid = server->ops->setup_async_request(server, rqst);
798         if (IS_ERR(mid)) {
799                 mutex_unlock(&server->srv_mutex);
800                 add_credits_and_wake_if(server, &credits, optype);
801                 return PTR_ERR(mid);
802         }
803
804         mid->receive = receive;
805         mid->callback = callback;
806         mid->callback_data = cbdata;
807         mid->handle = handle;
808         mid->mid_state = MID_REQUEST_SUBMITTED;
809
810         /* put it on the pending_mid_q */
811         spin_lock(&GlobalMid_Lock);
812         list_add_tail(&mid->qhead, &server->pending_mid_q);
813         spin_unlock(&GlobalMid_Lock);
814
815         /*
816          * Need to store the time in mid before calling I/O. For call_async,
817          * I/O response may come back and free the mid entry on another thread.
818          */
819         cifs_save_when_sent(mid);
820         cifs_in_send_inc(server);
821         rc = smb_send_rqst(server, 1, rqst, flags);
822         cifs_in_send_dec(server);
823
824         if (rc < 0) {
825                 revert_current_mid(server, mid->credits);
826                 server->sequence_number -= 2;
827                 cifs_delete_mid(mid);
828         }
829
830         mutex_unlock(&server->srv_mutex);
831
832         if (rc == 0)
833                 return 0;
834
835         add_credits_and_wake_if(server, &credits, optype);
836         return rc;
837 }
838
839 /*
840  *
841  * Send an SMB Request.  No response info (other than return code)
842  * needs to be parsed.
843  *
844  * flags indicate the type of request buffer and how long to wait
845  * and whether to log NT STATUS code (error) before mapping it to POSIX error
846  *
847  */
848 int
849 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
850                  char *in_buf, int flags)
851 {
852         int rc;
853         struct kvec iov[1];
854         struct kvec rsp_iov;
855         int resp_buf_type;
856
857         iov[0].iov_base = in_buf;
858         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
859         flags |= CIFS_NO_RSP_BUF;
860         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
861         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
862
863         return rc;
864 }
865
866 static int
867 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
868 {
869         int rc = 0;
870
871         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
872                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
873
874         spin_lock(&GlobalMid_Lock);
875         switch (mid->mid_state) {
876         case MID_RESPONSE_RECEIVED:
877                 spin_unlock(&GlobalMid_Lock);
878                 return rc;
879         case MID_RETRY_NEEDED:
880                 rc = -EAGAIN;
881                 break;
882         case MID_RESPONSE_MALFORMED:
883                 rc = -EIO;
884                 break;
885         case MID_SHUTDOWN:
886                 rc = -EHOSTDOWN;
887                 break;
888         default:
889                 if (!(mid->mid_flags & MID_DELETED)) {
890                         list_del_init(&mid->qhead);
891                         mid->mid_flags |= MID_DELETED;
892                 }
893                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
894                          __func__, mid->mid, mid->mid_state);
895                 rc = -EIO;
896         }
897         spin_unlock(&GlobalMid_Lock);
898
899         DeleteMidQEntry(mid);
900         return rc;
901 }
902
903 static inline int
904 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
905             struct mid_q_entry *mid)
906 {
907         return server->ops->send_cancel ?
908                                 server->ops->send_cancel(server, rqst, mid) : 0;
909 }
910
911 int
912 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
913                    bool log_error)
914 {
915         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
916
917         dump_smb(mid->resp_buf, min_t(u32, 92, len));
918
919         /* convert the length into a more usable form */
920         if (server->sign) {
921                 struct kvec iov[2];
922                 int rc = 0;
923                 struct smb_rqst rqst = { .rq_iov = iov,
924                                          .rq_nvec = 2 };
925
926                 iov[0].iov_base = mid->resp_buf;
927                 iov[0].iov_len = 4;
928                 iov[1].iov_base = (char *)mid->resp_buf + 4;
929                 iov[1].iov_len = len - 4;
930                 /* FIXME: add code to kill session */
931                 rc = cifs_verify_signature(&rqst, server,
932                                            mid->sequence_number);
933                 if (rc)
934                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
935                                  rc);
936         }
937
938         /* BB special case reconnect tid and uid here? */
939         return map_smb_to_linux_error(mid->resp_buf, log_error);
940 }
941
942 struct mid_q_entry *
943 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
944                    struct smb_rqst *rqst)
945 {
946         int rc;
947         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
948         struct mid_q_entry *mid;
949
950         if (rqst->rq_iov[0].iov_len != 4 ||
951             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
952                 return ERR_PTR(-EIO);
953
954         rc = allocate_mid(ses, hdr, &mid);
955         if (rc)
956                 return ERR_PTR(rc);
957         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
958         if (rc) {
959                 cifs_delete_mid(mid);
960                 return ERR_PTR(rc);
961         }
962         return mid;
963 }
964
965 static void
966 cifs_compound_callback(struct mid_q_entry *mid)
967 {
968         struct TCP_Server_Info *server = mid->server;
969         struct cifs_credits credits;
970
971         credits.value = server->ops->get_credits(mid);
972         credits.instance = server->reconnect_instance;
973
974         add_credits(server, &credits, mid->optype);
975 }
976
977 static void
978 cifs_compound_last_callback(struct mid_q_entry *mid)
979 {
980         cifs_compound_callback(mid);
981         cifs_wake_up_task(mid);
982 }
983
984 static void
985 cifs_cancelled_callback(struct mid_q_entry *mid)
986 {
987         cifs_compound_callback(mid);
988         DeleteMidQEntry(mid);
989 }
990
991 /*
992  * Return a channel (master if none) of @ses that can be used to send
993  * regular requests.
994  *
995  * If we are currently binding a new channel (negprot/sess.setup),
996  * return the new incomplete channel.
997  */
998 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
999 {
1000         uint index = 0;
1001
1002         if (!ses)
1003                 return NULL;
1004
1005         if (!ses->binding) {
1006                 /* round robin */
1007                 if (ses->chan_count > 1) {
1008                         index = (uint)atomic_inc_return(&ses->chan_seq);
1009                         index %= ses->chan_count;
1010                 }
1011                 return ses->chans[index].server;
1012         } else {
1013                 return cifs_ses_server(ses);
1014         }
1015 }
1016
1017 int
1018 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1019                    struct TCP_Server_Info *server,
1020                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1021                    int *resp_buf_type, struct kvec *resp_iov)
1022 {
1023         int i, j, optype, rc = 0;
1024         struct mid_q_entry *midQ[MAX_COMPOUND];
1025         bool cancelled_mid[MAX_COMPOUND] = {false};
1026         struct cifs_credits credits[MAX_COMPOUND] = {
1027                 { .value = 0, .instance = 0 }
1028         };
1029         unsigned int instance;
1030         char *buf;
1031
1032         optype = flags & CIFS_OP_MASK;
1033
1034         for (i = 0; i < num_rqst; i++)
1035                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1036
1037         if (!ses || !ses->server || !server) {
1038                 cifs_dbg(VFS, "Null session\n");
1039                 return -EIO;
1040         }
1041
1042         if (server->tcpStatus == CifsExiting)
1043                 return -ENOENT;
1044
1045         /*
1046          * Wait for all the requests to become available.
1047          * This approach still leaves the possibility to be stuck waiting for
1048          * credits if the server doesn't grant credits to the outstanding
1049          * requests and if the client is completely idle, not generating any
1050          * other requests.
1051          * This can be handled by the eventual session reconnect.
1052          */
1053         rc = wait_for_compound_request(server, num_rqst, flags,
1054                                        &instance);
1055         if (rc)
1056                 return rc;
1057
1058         for (i = 0; i < num_rqst; i++) {
1059                 credits[i].value = 1;
1060                 credits[i].instance = instance;
1061         }
1062
1063         /*
1064          * Make sure that we sign in the same order that we send on this socket
1065          * and avoid races inside tcp sendmsg code that could cause corruption
1066          * of smb data.
1067          */
1068
1069         mutex_lock(&server->srv_mutex);
1070
1071         /*
1072          * All the parts of the compound chain belong obtained credits from the
1073          * same session. We can not use credits obtained from the previous
1074          * session to send this request. Check if there were reconnects after
1075          * we obtained credits and return -EAGAIN in such cases to let callers
1076          * handle it.
1077          */
1078         if (instance != server->reconnect_instance) {
1079                 mutex_unlock(&server->srv_mutex);
1080                 for (j = 0; j < num_rqst; j++)
1081                         add_credits(server, &credits[j], optype);
1082                 return -EAGAIN;
1083         }
1084
1085         for (i = 0; i < num_rqst; i++) {
1086                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1087                 if (IS_ERR(midQ[i])) {
1088                         revert_current_mid(server, i);
1089                         for (j = 0; j < i; j++)
1090                                 cifs_delete_mid(midQ[j]);
1091                         mutex_unlock(&server->srv_mutex);
1092
1093                         /* Update # of requests on wire to server */
1094                         for (j = 0; j < num_rqst; j++)
1095                                 add_credits(server, &credits[j], optype);
1096                         return PTR_ERR(midQ[i]);
1097                 }
1098
1099                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1100                 midQ[i]->optype = optype;
1101                 /*
1102                  * Invoke callback for every part of the compound chain
1103                  * to calculate credits properly. Wake up this thread only when
1104                  * the last element is received.
1105                  */
1106                 if (i < num_rqst - 1)
1107                         midQ[i]->callback = cifs_compound_callback;
1108                 else
1109                         midQ[i]->callback = cifs_compound_last_callback;
1110         }
1111         cifs_in_send_inc(server);
1112         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1113         cifs_in_send_dec(server);
1114
1115         for (i = 0; i < num_rqst; i++)
1116                 cifs_save_when_sent(midQ[i]);
1117
1118         if (rc < 0) {
1119                 revert_current_mid(server, num_rqst);
1120                 server->sequence_number -= 2;
1121         }
1122
1123         mutex_unlock(&server->srv_mutex);
1124
1125         /*
1126          * If sending failed for some reason or it is an oplock break that we
1127          * will not receive a response to - return credits back
1128          */
1129         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1130                 for (i = 0; i < num_rqst; i++)
1131                         add_credits(server, &credits[i], optype);
1132                 goto out;
1133         }
1134
1135         /*
1136          * At this point the request is passed to the network stack - we assume
1137          * that any credits taken from the server structure on the client have
1138          * been spent and we can't return them back. Once we receive responses
1139          * we will collect credits granted by the server in the mid callbacks
1140          * and add those credits to the server structure.
1141          */
1142
1143         /*
1144          * Compounding is never used during session establish.
1145          */
1146         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1147                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1148                                            rqst[0].rq_nvec);
1149
1150         for (i = 0; i < num_rqst; i++) {
1151                 rc = wait_for_response(server, midQ[i]);
1152                 if (rc != 0)
1153                         break;
1154         }
1155         if (rc != 0) {
1156                 for (; i < num_rqst; i++) {
1157                         cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1158                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1159                         send_cancel(server, &rqst[i], midQ[i]);
1160                         spin_lock(&GlobalMid_Lock);
1161                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1162                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1163                                 midQ[i]->callback = cifs_cancelled_callback;
1164                                 cancelled_mid[i] = true;
1165                                 credits[i].value = 0;
1166                         }
1167                         spin_unlock(&GlobalMid_Lock);
1168                 }
1169         }
1170
1171         for (i = 0; i < num_rqst; i++) {
1172                 if (rc < 0)
1173                         goto out;
1174
1175                 rc = cifs_sync_mid_result(midQ[i], server);
1176                 if (rc != 0) {
1177                         /* mark this mid as cancelled to not free it below */
1178                         cancelled_mid[i] = true;
1179                         goto out;
1180                 }
1181
1182                 if (!midQ[i]->resp_buf ||
1183                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1184                         rc = -EIO;
1185                         cifs_dbg(FYI, "Bad MID state?\n");
1186                         goto out;
1187                 }
1188
1189                 buf = (char *)midQ[i]->resp_buf;
1190                 resp_iov[i].iov_base = buf;
1191                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1192                         server->vals->header_preamble_size;
1193
1194                 if (midQ[i]->large_buf)
1195                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1196                 else
1197                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1198
1199                 rc = server->ops->check_receive(midQ[i], server,
1200                                                      flags & CIFS_LOG_ERROR);
1201
1202                 /* mark it so buf will not be freed by cifs_delete_mid */
1203                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1204                         midQ[i]->resp_buf = NULL;
1205
1206         }
1207
1208         /*
1209          * Compounding is never used during session establish.
1210          */
1211         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1212                 struct kvec iov = {
1213                         .iov_base = resp_iov[0].iov_base,
1214                         .iov_len = resp_iov[0].iov_len
1215                 };
1216                 smb311_update_preauth_hash(ses, &iov, 1);
1217         }
1218
1219 out:
1220         /*
1221          * This will dequeue all mids. After this it is important that the
1222          * demultiplex_thread will not process any of these mids any futher.
1223          * This is prevented above by using a noop callback that will not
1224          * wake this thread except for the very last PDU.
1225          */
1226         for (i = 0; i < num_rqst; i++) {
1227                 if (!cancelled_mid[i])
1228                         cifs_delete_mid(midQ[i]);
1229         }
1230
1231         return rc;
1232 }
1233
1234 int
1235 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1236                struct TCP_Server_Info *server,
1237                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1238                struct kvec *resp_iov)
1239 {
1240         return compound_send_recv(xid, ses, server, flags, 1,
1241                                   rqst, resp_buf_type, resp_iov);
1242 }
1243
1244 int
1245 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1246              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1247              const int flags, struct kvec *resp_iov)
1248 {
1249         struct smb_rqst rqst;
1250         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1251         int rc;
1252
1253         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1254                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1255                                         GFP_KERNEL);
1256                 if (!new_iov) {
1257                         /* otherwise cifs_send_recv below sets resp_buf_type */
1258                         *resp_buf_type = CIFS_NO_BUFFER;
1259                         return -ENOMEM;
1260                 }
1261         } else
1262                 new_iov = s_iov;
1263
1264         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1265         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1266
1267         new_iov[0].iov_base = new_iov[1].iov_base;
1268         new_iov[0].iov_len = 4;
1269         new_iov[1].iov_base += 4;
1270         new_iov[1].iov_len -= 4;
1271
1272         memset(&rqst, 0, sizeof(struct smb_rqst));
1273         rqst.rq_iov = new_iov;
1274         rqst.rq_nvec = n_vec + 1;
1275
1276         rc = cifs_send_recv(xid, ses, ses->server,
1277                             &rqst, resp_buf_type, flags, resp_iov);
1278         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1279                 kfree(new_iov);
1280         return rc;
1281 }
1282
1283 int
1284 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1285             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1286             int *pbytes_returned, const int flags)
1287 {
1288         int rc = 0;
1289         struct mid_q_entry *midQ;
1290         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1291         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1292         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1293         struct cifs_credits credits = { .value = 1, .instance = 0 };
1294         struct TCP_Server_Info *server;
1295
1296         if (ses == NULL) {
1297                 cifs_dbg(VFS, "Null smb session\n");
1298                 return -EIO;
1299         }
1300         server = ses->server;
1301         if (server == NULL) {
1302                 cifs_dbg(VFS, "Null tcp session\n");
1303                 return -EIO;
1304         }
1305
1306         if (server->tcpStatus == CifsExiting)
1307                 return -ENOENT;
1308
1309         /* Ensure that we do not send more than 50 overlapping requests
1310            to the same server. We may make this configurable later or
1311            use ses->maxReq */
1312
1313         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1314                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1315                                 len);
1316                 return -EIO;
1317         }
1318
1319         rc = wait_for_free_request(server, flags, &credits.instance);
1320         if (rc)
1321                 return rc;
1322
1323         /* make sure that we sign in the same order that we send on this socket
1324            and avoid races inside tcp sendmsg code that could cause corruption
1325            of smb data */
1326
1327         mutex_lock(&server->srv_mutex);
1328
1329         rc = allocate_mid(ses, in_buf, &midQ);
1330         if (rc) {
1331                 mutex_unlock(&server->srv_mutex);
1332                 /* Update # of requests on wire to server */
1333                 add_credits(server, &credits, 0);
1334                 return rc;
1335         }
1336
1337         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1338         if (rc) {
1339                 mutex_unlock(&server->srv_mutex);
1340                 goto out;
1341         }
1342
1343         midQ->mid_state = MID_REQUEST_SUBMITTED;
1344
1345         cifs_in_send_inc(server);
1346         rc = smb_send(server, in_buf, len);
1347         cifs_in_send_dec(server);
1348         cifs_save_when_sent(midQ);
1349
1350         if (rc < 0)
1351                 server->sequence_number -= 2;
1352
1353         mutex_unlock(&server->srv_mutex);
1354
1355         if (rc < 0)
1356                 goto out;
1357
1358         rc = wait_for_response(server, midQ);
1359         if (rc != 0) {
1360                 send_cancel(server, &rqst, midQ);
1361                 spin_lock(&GlobalMid_Lock);
1362                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1363                         /* no longer considered to be "in-flight" */
1364                         midQ->callback = DeleteMidQEntry;
1365                         spin_unlock(&GlobalMid_Lock);
1366                         add_credits(server, &credits, 0);
1367                         return rc;
1368                 }
1369                 spin_unlock(&GlobalMid_Lock);
1370         }
1371
1372         rc = cifs_sync_mid_result(midQ, server);
1373         if (rc != 0) {
1374                 add_credits(server, &credits, 0);
1375                 return rc;
1376         }
1377
1378         if (!midQ->resp_buf || !out_buf ||
1379             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1380                 rc = -EIO;
1381                 cifs_server_dbg(VFS, "Bad MID state?\n");
1382                 goto out;
1383         }
1384
1385         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1386         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1387         rc = cifs_check_receive(midQ, server, 0);
1388 out:
1389         cifs_delete_mid(midQ);
1390         add_credits(server, &credits, 0);
1391
1392         return rc;
1393 }
1394
1395 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1396    blocking lock to return. */
1397
1398 static int
1399 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1400                         struct smb_hdr *in_buf,
1401                         struct smb_hdr *out_buf)
1402 {
1403         int bytes_returned;
1404         struct cifs_ses *ses = tcon->ses;
1405         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1406
1407         /* We just modify the current in_buf to change
1408            the type of lock from LOCKING_ANDX_SHARED_LOCK
1409            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1410            LOCKING_ANDX_CANCEL_LOCK. */
1411
1412         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1413         pSMB->Timeout = 0;
1414         pSMB->hdr.Mid = get_next_mid(ses->server);
1415
1416         return SendReceive(xid, ses, in_buf, out_buf,
1417                         &bytes_returned, 0);
1418 }
1419
1420 int
1421 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1422             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1423             int *pbytes_returned)
1424 {
1425         int rc = 0;
1426         int rstart = 0;
1427         struct mid_q_entry *midQ;
1428         struct cifs_ses *ses;
1429         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1430         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1431         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1432         unsigned int instance;
1433         struct TCP_Server_Info *server;
1434
1435         if (tcon == NULL || tcon->ses == NULL) {
1436                 cifs_dbg(VFS, "Null smb session\n");
1437                 return -EIO;
1438         }
1439         ses = tcon->ses;
1440         server = ses->server;
1441
1442         if (server == NULL) {
1443                 cifs_dbg(VFS, "Null tcp session\n");
1444                 return -EIO;
1445         }
1446
1447         if (server->tcpStatus == CifsExiting)
1448                 return -ENOENT;
1449
1450         /* Ensure that we do not send more than 50 overlapping requests
1451            to the same server. We may make this configurable later or
1452            use ses->maxReq */
1453
1454         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1455                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1456                               len);
1457                 return -EIO;
1458         }
1459
1460         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1461         if (rc)
1462                 return rc;
1463
1464         /* make sure that we sign in the same order that we send on this socket
1465            and avoid races inside tcp sendmsg code that could cause corruption
1466            of smb data */
1467
1468         mutex_lock(&server->srv_mutex);
1469
1470         rc = allocate_mid(ses, in_buf, &midQ);
1471         if (rc) {
1472                 mutex_unlock(&server->srv_mutex);
1473                 return rc;
1474         }
1475
1476         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1477         if (rc) {
1478                 cifs_delete_mid(midQ);
1479                 mutex_unlock(&server->srv_mutex);
1480                 return rc;
1481         }
1482
1483         midQ->mid_state = MID_REQUEST_SUBMITTED;
1484         cifs_in_send_inc(server);
1485         rc = smb_send(server, in_buf, len);
1486         cifs_in_send_dec(server);
1487         cifs_save_when_sent(midQ);
1488
1489         if (rc < 0)
1490                 server->sequence_number -= 2;
1491
1492         mutex_unlock(&server->srv_mutex);
1493
1494         if (rc < 0) {
1495                 cifs_delete_mid(midQ);
1496                 return rc;
1497         }
1498
1499         /* Wait for a reply - allow signals to interrupt. */
1500         rc = wait_event_interruptible(server->response_q,
1501                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1502                 ((server->tcpStatus != CifsGood) &&
1503                  (server->tcpStatus != CifsNew)));
1504
1505         /* Were we interrupted by a signal ? */
1506         if ((rc == -ERESTARTSYS) &&
1507                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1508                 ((server->tcpStatus == CifsGood) ||
1509                  (server->tcpStatus == CifsNew))) {
1510
1511                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1512                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1513                            blocking lock to return. */
1514                         rc = send_cancel(server, &rqst, midQ);
1515                         if (rc) {
1516                                 cifs_delete_mid(midQ);
1517                                 return rc;
1518                         }
1519                 } else {
1520                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1521                            to cause the blocking lock to return. */
1522
1523                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1524
1525                         /* If we get -ENOLCK back the lock may have
1526                            already been removed. Don't exit in this case. */
1527                         if (rc && rc != -ENOLCK) {
1528                                 cifs_delete_mid(midQ);
1529                                 return rc;
1530                         }
1531                 }
1532
1533                 rc = wait_for_response(server, midQ);
1534                 if (rc) {
1535                         send_cancel(server, &rqst, midQ);
1536                         spin_lock(&GlobalMid_Lock);
1537                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1538                                 /* no longer considered to be "in-flight" */
1539                                 midQ->callback = DeleteMidQEntry;
1540                                 spin_unlock(&GlobalMid_Lock);
1541                                 return rc;
1542                         }
1543                         spin_unlock(&GlobalMid_Lock);
1544                 }
1545
1546                 /* We got the response - restart system call. */
1547                 rstart = 1;
1548         }
1549
1550         rc = cifs_sync_mid_result(midQ, server);
1551         if (rc != 0)
1552                 return rc;
1553
1554         /* rcvd frame is ok */
1555         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1556                 rc = -EIO;
1557                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1558                 goto out;
1559         }
1560
1561         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1562         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1563         rc = cifs_check_receive(midQ, server, 0);
1564 out:
1565         cifs_delete_mid(midQ);
1566         if (rstart && rc == -EACCES)
1567                 return -ERESTARTSYS;
1568         return rc;
1569 }