docs: hwmon: (pmbus/ltc2978) Update datasheet URLs to analog.com.
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         get_task_struct(current);
80         temp->creator = current;
81         temp->callback = cifs_wake_up_task;
82         temp->callback_data = current;
83
84         atomic_inc(&midCount);
85         temp->mid_state = MID_REQUEST_ALLOCATED;
86         return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91         struct mid_q_entry *midEntry =
92                         container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94         __le16 command = midEntry->server->vals->lock_cmd;
95         __u16 smb_cmd = le16_to_cpu(midEntry->command);
96         unsigned long now;
97         unsigned long roundtrip_time;
98 #endif
99         struct TCP_Server_Info *server = midEntry->server;
100
101         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103             server->ops->handle_cancelled_mid)
104                 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106         midEntry->mid_state = MID_FREE;
107         atomic_dec(&midCount);
108         if (midEntry->large_buf)
109                 cifs_buf_release(midEntry->resp_buf);
110         else
111                 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113         now = jiffies;
114         if (now < midEntry->when_alloc)
115                 cifs_server_dbg(VFS, "invalid mid allocation time\n");
116         roundtrip_time = now - midEntry->when_alloc;
117
118         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120                         server->slowest_cmd[smb_cmd] = roundtrip_time;
121                         server->fastest_cmd[smb_cmd] = roundtrip_time;
122                 } else {
123                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
125                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
127                 }
128                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129                 server->time_per_cmd[smb_cmd] += roundtrip_time;
130         }
131         /*
132          * commands taking longer than one second (default) can be indications
133          * that something is wrong, unless it is quite a slow link or a very
134          * busy server. Note that this calc is unlikely or impossible to wrap
135          * as long as slow_rsp_threshold is not set way above recommended max
136          * value (32767 ie 9 hours) and is generally harmless even if wrong
137          * since only affects debug counters - so leaving the calc as simple
138          * comparison rather than doing multiple conversions and overflow
139          * checks
140          */
141         if ((slow_rsp_threshold != 0) &&
142             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143             (midEntry->command != command)) {
144                 /*
145                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146                  * NB: le16_to_cpu returns unsigned so can not be negative below
147                  */
148                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152                                midEntry->when_sent, midEntry->when_received);
153                 if (cifsFYI & CIFS_TIMER) {
154                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
155                                midEntry->command, midEntry->mid);
156                         cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
157                                now - midEntry->when_alloc,
158                                now - midEntry->when_sent,
159                                now - midEntry->when_received);
160                 }
161         }
162 #endif
163         put_task_struct(midEntry->creator);
164
165         mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170         spin_lock(&GlobalMid_Lock);
171         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172         spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177         cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183         spin_lock(&GlobalMid_Lock);
184         if (!(mid->mid_flags & MID_DELETED)) {
185                 list_del_init(&mid->qhead);
186                 mid->mid_flags |= MID_DELETED;
187         }
188         spin_unlock(&GlobalMid_Lock);
189
190         DeleteMidQEntry(mid);
191 }
192
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:     Server to send the data to
196  * @smb_msg:    Message to send
197  * @sent:       amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204               size_t *sent)
205 {
206         int rc = 0;
207         int retries = 0;
208         struct socket *ssocket = server->ssocket;
209
210         *sent = 0;
211
212         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213         smb_msg->msg_namelen = sizeof(struct sockaddr);
214         smb_msg->msg_control = NULL;
215         smb_msg->msg_controllen = 0;
216         if (server->noblocksnd)
217                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218         else
219                 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221         while (msg_data_left(smb_msg)) {
222                 /*
223                  * If blocking send, we try 3 times, since each can block
224                  * for 5 seconds. For nonblocking  we have to try more
225                  * but wait increasing amounts of time allowing time for
226                  * socket to clear.  The overall time we wait in either
227                  * case to send on the socket is about 15 seconds.
228                  * Similarly we wait for 15 seconds for a response from
229                  * the server in SendReceive[2] for the server to send
230                  * a response back for most types of requests (except
231                  * SMB Write past end of file which can be slow, and
232                  * blocking lock operations). NFS waits slightly longer
233                  * than CIFS, but this can make it take longer for
234                  * nonresponsive servers to be detected and 15 seconds
235                  * is more than enough time for modern networks to
236                  * send a packet.  In most cases if we fail to send
237                  * after the retries we will kill the socket and
238                  * reconnect which may clear the network problem.
239                  */
240                 rc = sock_sendmsg(ssocket, smb_msg);
241                 if (rc == -EAGAIN) {
242                         retries++;
243                         if (retries >= 14 ||
244                             (!server->noblocksnd && (retries > 2))) {
245                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246                                          ssocket);
247                                 return -EAGAIN;
248                         }
249                         msleep(1 << retries);
250                         continue;
251                 }
252
253                 if (rc < 0)
254                         return rc;
255
256                 if (rc == 0) {
257                         /* should never happen, letting socket clear before
258                            retrying is our only obvious option here */
259                         cifs_server_dbg(VFS, "tcp sent no data\n");
260                         msleep(500);
261                         continue;
262                 }
263
264                 /* send was at least partially successful */
265                 *sent += rc;
266                 retries = 0; /* in case we get ENOSPC on the next send */
267         }
268         return 0;
269 }
270
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274         unsigned int i;
275         struct kvec *iov;
276         int nvec;
277         unsigned long buflen = 0;
278
279         if (server->vals->header_preamble_size == 0 &&
280             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281                 iov = &rqst->rq_iov[1];
282                 nvec = rqst->rq_nvec - 1;
283         } else {
284                 iov = rqst->rq_iov;
285                 nvec = rqst->rq_nvec;
286         }
287
288         /* total up iov array first */
289         for (i = 0; i < nvec; i++)
290                 buflen += iov[i].iov_len;
291
292         /*
293          * Add in the page array if there is one. The caller needs to make
294          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295          * multiple pages ends at page boundary, rq_tailsz needs to be set to
296          * PAGE_SIZE.
297          */
298         if (rqst->rq_npages) {
299                 if (rqst->rq_npages == 1)
300                         buflen += rqst->rq_tailsz;
301                 else {
302                         /*
303                          * If there is more than one page, calculate the
304                          * buffer length based on rq_offset and rq_tailsz
305                          */
306                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307                                         rqst->rq_offset;
308                         buflen += rqst->rq_tailsz;
309                 }
310         }
311
312         return buflen;
313 }
314
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317                 struct smb_rqst *rqst)
318 {
319         int rc = 0;
320         struct kvec *iov;
321         int n_vec;
322         unsigned int send_length = 0;
323         unsigned int i, j;
324         sigset_t mask, oldmask;
325         size_t total_len = 0, sent, size;
326         struct socket *ssocket = server->ssocket;
327         struct msghdr smb_msg;
328         int val = 1;
329         __be32 rfc1002_marker;
330
331         if (cifs_rdma_enabled(server)) {
332                 /* return -EAGAIN when connecting or reconnecting */
333                 rc = -EAGAIN;
334                 if (server->smbd_conn)
335                         rc = smbd_send(server, num_rqst, rqst);
336                 goto smbd_done;
337         }
338
339         if (ssocket == NULL)
340                 return -EAGAIN;
341
342         if (signal_pending(current)) {
343                 cifs_dbg(FYI, "signal is pending before sending any data\n");
344                 return -EINTR;
345         }
346
347         /* cork the socket */
348         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349                                 (char *)&val, sizeof(val));
350
351         for (j = 0; j < num_rqst; j++)
352                 send_length += smb_rqst_len(server, &rqst[j]);
353         rfc1002_marker = cpu_to_be32(send_length);
354
355         /*
356          * We should not allow signals to interrupt the network send because
357          * any partial send will cause session reconnects thus increasing
358          * latency of system calls and overload a server with unnecessary
359          * requests.
360          */
361
362         sigfillset(&mask);
363         sigprocmask(SIG_BLOCK, &mask, &oldmask);
364
365         /* Generate a rfc1002 marker for SMB2+ */
366         if (server->vals->header_preamble_size == 0) {
367                 struct kvec hiov = {
368                         .iov_base = &rfc1002_marker,
369                         .iov_len  = 4
370                 };
371                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
372                 rc = smb_send_kvec(server, &smb_msg, &sent);
373                 if (rc < 0)
374                         goto unmask;
375
376                 total_len += sent;
377                 send_length += 4;
378         }
379
380         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
381
382         for (j = 0; j < num_rqst; j++) {
383                 iov = rqst[j].rq_iov;
384                 n_vec = rqst[j].rq_nvec;
385
386                 size = 0;
387                 for (i = 0; i < n_vec; i++) {
388                         dump_smb(iov[i].iov_base, iov[i].iov_len);
389                         size += iov[i].iov_len;
390                 }
391
392                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
393
394                 rc = smb_send_kvec(server, &smb_msg, &sent);
395                 if (rc < 0)
396                         goto unmask;
397
398                 total_len += sent;
399
400                 /* now walk the page array and send each page in it */
401                 for (i = 0; i < rqst[j].rq_npages; i++) {
402                         struct bio_vec bvec;
403
404                         bvec.bv_page = rqst[j].rq_pages[i];
405                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406                                              &bvec.bv_offset);
407
408                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
409                                       &bvec, 1, bvec.bv_len);
410                         rc = smb_send_kvec(server, &smb_msg, &sent);
411                         if (rc < 0)
412                                 break;
413
414                         total_len += sent;
415                 }
416         }
417
418 unmask:
419         sigprocmask(SIG_SETMASK, &oldmask, NULL);
420
421         /*
422          * If signal is pending but we have already sent the whole packet to
423          * the server we need to return success status to allow a corresponding
424          * mid entry to be kept in the pending requests queue thus allowing
425          * to handle responses from the server by the client.
426          *
427          * If only part of the packet has been sent there is no need to hide
428          * interrupt because the session will be reconnected anyway, so there
429          * won't be any response from the server to handle.
430          */
431
432         if (signal_pending(current) && (total_len != send_length)) {
433                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434                 rc = -EINTR;
435         }
436
437         /* uncork it */
438         val = 0;
439         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440                                 (char *)&val, sizeof(val));
441
442         if ((total_len > 0) && (total_len != send_length)) {
443                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
444                          send_length, total_len);
445                 /*
446                  * If we have only sent part of an SMB then the next SMB could
447                  * be taken as the remainder of this one. We need to kill the
448                  * socket so the server throws away the partial SMB
449                  */
450                 server->tcpStatus = CifsNeedReconnect;
451                 trace_smb3_partial_send_reconnect(server->CurrentMid,
452                                                   server->hostname);
453         }
454 smbd_done:
455         if (rc < 0 && rc != -EINTR)
456                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
457                          rc);
458         else if (rc > 0)
459                 rc = 0;
460
461         return rc;
462 }
463
464 static int
465 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
466               struct smb_rqst *rqst, int flags)
467 {
468         struct kvec iov;
469         struct smb2_transform_hdr tr_hdr;
470         struct smb_rqst cur_rqst[MAX_COMPOUND];
471         int rc;
472
473         if (!(flags & CIFS_TRANSFORM_REQ))
474                 return __smb_send_rqst(server, num_rqst, rqst);
475
476         if (num_rqst > MAX_COMPOUND - 1)
477                 return -ENOMEM;
478
479         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
480         memset(&iov, 0, sizeof(iov));
481         memset(&tr_hdr, 0, sizeof(tr_hdr));
482
483         iov.iov_base = &tr_hdr;
484         iov.iov_len = sizeof(tr_hdr);
485         cur_rqst[0].rq_iov = &iov;
486         cur_rqst[0].rq_nvec = 1;
487
488         if (!server->ops->init_transform_rq) {
489                 cifs_server_dbg(VFS, "Encryption requested but transform "
490                                 "callback is missing\n");
491                 return -EIO;
492         }
493
494         rc = server->ops->init_transform_rq(server, num_rqst + 1,
495                                             &cur_rqst[0], rqst);
496         if (rc)
497                 return rc;
498
499         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
500         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
501         return rc;
502 }
503
504 int
505 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
506          unsigned int smb_buf_length)
507 {
508         struct kvec iov[2];
509         struct smb_rqst rqst = { .rq_iov = iov,
510                                  .rq_nvec = 2 };
511
512         iov[0].iov_base = smb_buffer;
513         iov[0].iov_len = 4;
514         iov[1].iov_base = (char *)smb_buffer + 4;
515         iov[1].iov_len = smb_buf_length;
516
517         return __smb_send_rqst(server, 1, &rqst);
518 }
519
520 static int
521 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
522                       const int timeout, const int flags,
523                       unsigned int *instance)
524 {
525         int rc;
526         int *credits;
527         int optype;
528         long int t;
529
530         if (timeout < 0)
531                 t = MAX_JIFFY_OFFSET;
532         else
533                 t = msecs_to_jiffies(timeout);
534
535         optype = flags & CIFS_OP_MASK;
536
537         *instance = 0;
538
539         credits = server->ops->get_credits_field(server, optype);
540         /* Since an echo is already inflight, no need to wait to send another */
541         if (*credits <= 0 && optype == CIFS_ECHO_OP)
542                 return -EAGAIN;
543
544         spin_lock(&server->req_lock);
545         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
546                 /* oplock breaks must not be held up */
547                 server->in_flight++;
548                 if (server->in_flight > server->max_in_flight)
549                         server->max_in_flight = server->in_flight;
550                 *credits -= 1;
551                 *instance = server->reconnect_instance;
552                 spin_unlock(&server->req_lock);
553                 return 0;
554         }
555
556         while (1) {
557                 if (*credits < num_credits) {
558                         spin_unlock(&server->req_lock);
559                         cifs_num_waiters_inc(server);
560                         rc = wait_event_killable_timeout(server->request_q,
561                                 has_credits(server, credits, num_credits), t);
562                         cifs_num_waiters_dec(server);
563                         if (!rc) {
564                                 trace_smb3_credit_timeout(server->CurrentMid,
565                                         server->hostname, num_credits);
566                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
567                                          timeout);
568                                 return -ENOTSUPP;
569                         }
570                         if (rc == -ERESTARTSYS)
571                                 return -ERESTARTSYS;
572                         spin_lock(&server->req_lock);
573                 } else {
574                         if (server->tcpStatus == CifsExiting) {
575                                 spin_unlock(&server->req_lock);
576                                 return -ENOENT;
577                         }
578
579                         /*
580                          * For normal commands, reserve the last MAX_COMPOUND
581                          * credits to compound requests.
582                          * Otherwise these compounds could be permanently
583                          * starved for credits by single-credit requests.
584                          *
585                          * To prevent spinning CPU, block this thread until
586                          * there are >MAX_COMPOUND credits available.
587                          * But only do this is we already have a lot of
588                          * credits in flight to avoid triggering this check
589                          * for servers that are slow to hand out credits on
590                          * new sessions.
591                          */
592                         if (!optype && num_credits == 1 &&
593                             server->in_flight > 2 * MAX_COMPOUND &&
594                             *credits <= MAX_COMPOUND) {
595                                 spin_unlock(&server->req_lock);
596                                 cifs_num_waiters_inc(server);
597                                 rc = wait_event_killable_timeout(
598                                         server->request_q,
599                                         has_credits(server, credits,
600                                                     MAX_COMPOUND + 1),
601                                         t);
602                                 cifs_num_waiters_dec(server);
603                                 if (!rc) {
604                                         trace_smb3_credit_timeout(
605                                                 server->CurrentMid,
606                                                 server->hostname, num_credits);
607                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
608                                                  timeout);
609                                         return -ENOTSUPP;
610                                 }
611                                 if (rc == -ERESTARTSYS)
612                                         return -ERESTARTSYS;
613                                 spin_lock(&server->req_lock);
614                                 continue;
615                         }
616
617                         /*
618                          * Can not count locking commands against total
619                          * as they are allowed to block on server.
620                          */
621
622                         /* update # of requests on the wire to server */
623                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
624                                 *credits -= num_credits;
625                                 server->in_flight += num_credits;
626                                 if (server->in_flight > server->max_in_flight)
627                                         server->max_in_flight = server->in_flight;
628                                 *instance = server->reconnect_instance;
629                         }
630                         spin_unlock(&server->req_lock);
631                         break;
632                 }
633         }
634         return 0;
635 }
636
637 static int
638 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
639                       unsigned int *instance)
640 {
641         return wait_for_free_credits(server, 1, -1, flags,
642                                      instance);
643 }
644
645 static int
646 wait_for_compound_request(struct TCP_Server_Info *server, int num,
647                           const int flags, unsigned int *instance)
648 {
649         int *credits;
650
651         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
652
653         spin_lock(&server->req_lock);
654         if (*credits < num) {
655                 /*
656                  * Return immediately if not too many requests in flight since
657                  * we will likely be stuck on waiting for credits.
658                  */
659                 if (server->in_flight < num - *credits) {
660                         spin_unlock(&server->req_lock);
661                         return -ENOTSUPP;
662                 }
663         }
664         spin_unlock(&server->req_lock);
665
666         return wait_for_free_credits(server, num, 60000, flags,
667                                      instance);
668 }
669
670 int
671 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
672                       unsigned int *num, struct cifs_credits *credits)
673 {
674         *num = size;
675         credits->value = 0;
676         credits->instance = server->reconnect_instance;
677         return 0;
678 }
679
680 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
681                         struct mid_q_entry **ppmidQ)
682 {
683         if (ses->server->tcpStatus == CifsExiting) {
684                 return -ENOENT;
685         }
686
687         if (ses->server->tcpStatus == CifsNeedReconnect) {
688                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
689                 return -EAGAIN;
690         }
691
692         if (ses->status == CifsNew) {
693                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
694                         (in_buf->Command != SMB_COM_NEGOTIATE))
695                         return -EAGAIN;
696                 /* else ok - we are setting up session */
697         }
698
699         if (ses->status == CifsExiting) {
700                 /* check if SMB session is bad because we are setting it up */
701                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
702                         return -EAGAIN;
703                 /* else ok - we are shutting down session */
704         }
705
706         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
707         if (*ppmidQ == NULL)
708                 return -ENOMEM;
709         spin_lock(&GlobalMid_Lock);
710         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
711         spin_unlock(&GlobalMid_Lock);
712         return 0;
713 }
714
715 static int
716 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
717 {
718         int error;
719
720         error = wait_event_freezekillable_unsafe(server->response_q,
721                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
722         if (error < 0)
723                 return -ERESTARTSYS;
724
725         return 0;
726 }
727
728 struct mid_q_entry *
729 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
730 {
731         int rc;
732         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
733         struct mid_q_entry *mid;
734
735         if (rqst->rq_iov[0].iov_len != 4 ||
736             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
737                 return ERR_PTR(-EIO);
738
739         /* enable signing if server requires it */
740         if (server->sign)
741                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
742
743         mid = AllocMidQEntry(hdr, server);
744         if (mid == NULL)
745                 return ERR_PTR(-ENOMEM);
746
747         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
748         if (rc) {
749                 DeleteMidQEntry(mid);
750                 return ERR_PTR(rc);
751         }
752
753         return mid;
754 }
755
756 /*
757  * Send a SMB request and set the callback function in the mid to handle
758  * the result. Caller is responsible for dealing with timeouts.
759  */
760 int
761 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
762                 mid_receive_t *receive, mid_callback_t *callback,
763                 mid_handle_t *handle, void *cbdata, const int flags,
764                 const struct cifs_credits *exist_credits)
765 {
766         int rc;
767         struct mid_q_entry *mid;
768         struct cifs_credits credits = { .value = 0, .instance = 0 };
769         unsigned int instance;
770         int optype;
771
772         optype = flags & CIFS_OP_MASK;
773
774         if ((flags & CIFS_HAS_CREDITS) == 0) {
775                 rc = wait_for_free_request(server, flags, &instance);
776                 if (rc)
777                         return rc;
778                 credits.value = 1;
779                 credits.instance = instance;
780         } else
781                 instance = exist_credits->instance;
782
783         mutex_lock(&server->srv_mutex);
784
785         /*
786          * We can't use credits obtained from the previous session to send this
787          * request. Check if there were reconnects after we obtained credits and
788          * return -EAGAIN in such cases to let callers handle it.
789          */
790         if (instance != server->reconnect_instance) {
791                 mutex_unlock(&server->srv_mutex);
792                 add_credits_and_wake_if(server, &credits, optype);
793                 return -EAGAIN;
794         }
795
796         mid = server->ops->setup_async_request(server, rqst);
797         if (IS_ERR(mid)) {
798                 mutex_unlock(&server->srv_mutex);
799                 add_credits_and_wake_if(server, &credits, optype);
800                 return PTR_ERR(mid);
801         }
802
803         mid->receive = receive;
804         mid->callback = callback;
805         mid->callback_data = cbdata;
806         mid->handle = handle;
807         mid->mid_state = MID_REQUEST_SUBMITTED;
808
809         /* put it on the pending_mid_q */
810         spin_lock(&GlobalMid_Lock);
811         list_add_tail(&mid->qhead, &server->pending_mid_q);
812         spin_unlock(&GlobalMid_Lock);
813
814         /*
815          * Need to store the time in mid before calling I/O. For call_async,
816          * I/O response may come back and free the mid entry on another thread.
817          */
818         cifs_save_when_sent(mid);
819         cifs_in_send_inc(server);
820         rc = smb_send_rqst(server, 1, rqst, flags);
821         cifs_in_send_dec(server);
822
823         if (rc < 0) {
824                 revert_current_mid(server, mid->credits);
825                 server->sequence_number -= 2;
826                 cifs_delete_mid(mid);
827         }
828
829         mutex_unlock(&server->srv_mutex);
830
831         if (rc == 0)
832                 return 0;
833
834         add_credits_and_wake_if(server, &credits, optype);
835         return rc;
836 }
837
838 /*
839  *
840  * Send an SMB Request.  No response info (other than return code)
841  * needs to be parsed.
842  *
843  * flags indicate the type of request buffer and how long to wait
844  * and whether to log NT STATUS code (error) before mapping it to POSIX error
845  *
846  */
847 int
848 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
849                  char *in_buf, int flags)
850 {
851         int rc;
852         struct kvec iov[1];
853         struct kvec rsp_iov;
854         int resp_buf_type;
855
856         iov[0].iov_base = in_buf;
857         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
858         flags |= CIFS_NO_RSP_BUF;
859         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
860         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
861
862         return rc;
863 }
864
865 static int
866 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
867 {
868         int rc = 0;
869
870         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
871                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
872
873         spin_lock(&GlobalMid_Lock);
874         switch (mid->mid_state) {
875         case MID_RESPONSE_RECEIVED:
876                 spin_unlock(&GlobalMid_Lock);
877                 return rc;
878         case MID_RETRY_NEEDED:
879                 rc = -EAGAIN;
880                 break;
881         case MID_RESPONSE_MALFORMED:
882                 rc = -EIO;
883                 break;
884         case MID_SHUTDOWN:
885                 rc = -EHOSTDOWN;
886                 break;
887         default:
888                 if (!(mid->mid_flags & MID_DELETED)) {
889                         list_del_init(&mid->qhead);
890                         mid->mid_flags |= MID_DELETED;
891                 }
892                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
893                          __func__, mid->mid, mid->mid_state);
894                 rc = -EIO;
895         }
896         spin_unlock(&GlobalMid_Lock);
897
898         DeleteMidQEntry(mid);
899         return rc;
900 }
901
902 static inline int
903 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
904             struct mid_q_entry *mid)
905 {
906         return server->ops->send_cancel ?
907                                 server->ops->send_cancel(server, rqst, mid) : 0;
908 }
909
910 int
911 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
912                    bool log_error)
913 {
914         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
915
916         dump_smb(mid->resp_buf, min_t(u32, 92, len));
917
918         /* convert the length into a more usable form */
919         if (server->sign) {
920                 struct kvec iov[2];
921                 int rc = 0;
922                 struct smb_rqst rqst = { .rq_iov = iov,
923                                          .rq_nvec = 2 };
924
925                 iov[0].iov_base = mid->resp_buf;
926                 iov[0].iov_len = 4;
927                 iov[1].iov_base = (char *)mid->resp_buf + 4;
928                 iov[1].iov_len = len - 4;
929                 /* FIXME: add code to kill session */
930                 rc = cifs_verify_signature(&rqst, server,
931                                            mid->sequence_number);
932                 if (rc)
933                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
934                                  rc);
935         }
936
937         /* BB special case reconnect tid and uid here? */
938         return map_smb_to_linux_error(mid->resp_buf, log_error);
939 }
940
941 struct mid_q_entry *
942 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
943                    struct smb_rqst *rqst)
944 {
945         int rc;
946         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
947         struct mid_q_entry *mid;
948
949         if (rqst->rq_iov[0].iov_len != 4 ||
950             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
951                 return ERR_PTR(-EIO);
952
953         rc = allocate_mid(ses, hdr, &mid);
954         if (rc)
955                 return ERR_PTR(rc);
956         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
957         if (rc) {
958                 cifs_delete_mid(mid);
959                 return ERR_PTR(rc);
960         }
961         return mid;
962 }
963
964 static void
965 cifs_compound_callback(struct mid_q_entry *mid)
966 {
967         struct TCP_Server_Info *server = mid->server;
968         struct cifs_credits credits;
969
970         credits.value = server->ops->get_credits(mid);
971         credits.instance = server->reconnect_instance;
972
973         add_credits(server, &credits, mid->optype);
974 }
975
976 static void
977 cifs_compound_last_callback(struct mid_q_entry *mid)
978 {
979         cifs_compound_callback(mid);
980         cifs_wake_up_task(mid);
981 }
982
983 static void
984 cifs_cancelled_callback(struct mid_q_entry *mid)
985 {
986         cifs_compound_callback(mid);
987         DeleteMidQEntry(mid);
988 }
989
990 int
991 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
992                    const int flags, const int num_rqst, struct smb_rqst *rqst,
993                    int *resp_buf_type, struct kvec *resp_iov)
994 {
995         int i, j, optype, rc = 0;
996         struct mid_q_entry *midQ[MAX_COMPOUND];
997         bool cancelled_mid[MAX_COMPOUND] = {false};
998         struct cifs_credits credits[MAX_COMPOUND] = {
999                 { .value = 0, .instance = 0 }
1000         };
1001         unsigned int instance;
1002         char *buf;
1003         struct TCP_Server_Info *server;
1004
1005         optype = flags & CIFS_OP_MASK;
1006
1007         for (i = 0; i < num_rqst; i++)
1008                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1009
1010         if ((ses == NULL) || (ses->server == NULL)) {
1011                 cifs_dbg(VFS, "Null session\n");
1012                 return -EIO;
1013         }
1014
1015         if (!ses->binding) {
1016                 uint index = 0;
1017
1018                 if (ses->chan_count > 1) {
1019                         index = (uint)atomic_inc_return(&ses->chan_seq);
1020                         index %= ses->chan_count;
1021                 }
1022                 server = ses->chans[index].server;
1023         } else {
1024                 server = cifs_ses_server(ses);
1025         }
1026
1027         if (server->tcpStatus == CifsExiting)
1028                 return -ENOENT;
1029
1030         /*
1031          * Wait for all the requests to become available.
1032          * This approach still leaves the possibility to be stuck waiting for
1033          * credits if the server doesn't grant credits to the outstanding
1034          * requests and if the client is completely idle, not generating any
1035          * other requests.
1036          * This can be handled by the eventual session reconnect.
1037          */
1038         rc = wait_for_compound_request(server, num_rqst, flags,
1039                                        &instance);
1040         if (rc)
1041                 return rc;
1042
1043         for (i = 0; i < num_rqst; i++) {
1044                 credits[i].value = 1;
1045                 credits[i].instance = instance;
1046         }
1047
1048         /*
1049          * Make sure that we sign in the same order that we send on this socket
1050          * and avoid races inside tcp sendmsg code that could cause corruption
1051          * of smb data.
1052          */
1053
1054         mutex_lock(&server->srv_mutex);
1055
1056         /*
1057          * All the parts of the compound chain belong obtained credits from the
1058          * same session. We can not use credits obtained from the previous
1059          * session to send this request. Check if there were reconnects after
1060          * we obtained credits and return -EAGAIN in such cases to let callers
1061          * handle it.
1062          */
1063         if (instance != server->reconnect_instance) {
1064                 mutex_unlock(&server->srv_mutex);
1065                 for (j = 0; j < num_rqst; j++)
1066                         add_credits(server, &credits[j], optype);
1067                 return -EAGAIN;
1068         }
1069
1070         for (i = 0; i < num_rqst; i++) {
1071                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1072                 if (IS_ERR(midQ[i])) {
1073                         revert_current_mid(server, i);
1074                         for (j = 0; j < i; j++)
1075                                 cifs_delete_mid(midQ[j]);
1076                         mutex_unlock(&server->srv_mutex);
1077
1078                         /* Update # of requests on wire to server */
1079                         for (j = 0; j < num_rqst; j++)
1080                                 add_credits(server, &credits[j], optype);
1081                         return PTR_ERR(midQ[i]);
1082                 }
1083
1084                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1085                 midQ[i]->optype = optype;
1086                 /*
1087                  * Invoke callback for every part of the compound chain
1088                  * to calculate credits properly. Wake up this thread only when
1089                  * the last element is received.
1090                  */
1091                 if (i < num_rqst - 1)
1092                         midQ[i]->callback = cifs_compound_callback;
1093                 else
1094                         midQ[i]->callback = cifs_compound_last_callback;
1095         }
1096         cifs_in_send_inc(server);
1097         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1098         cifs_in_send_dec(server);
1099
1100         for (i = 0; i < num_rqst; i++)
1101                 cifs_save_when_sent(midQ[i]);
1102
1103         if (rc < 0) {
1104                 revert_current_mid(server, num_rqst);
1105                 server->sequence_number -= 2;
1106         }
1107
1108         mutex_unlock(&server->srv_mutex);
1109
1110         /*
1111          * If sending failed for some reason or it is an oplock break that we
1112          * will not receive a response to - return credits back
1113          */
1114         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1115                 for (i = 0; i < num_rqst; i++)
1116                         add_credits(server, &credits[i], optype);
1117                 goto out;
1118         }
1119
1120         /*
1121          * At this point the request is passed to the network stack - we assume
1122          * that any credits taken from the server structure on the client have
1123          * been spent and we can't return them back. Once we receive responses
1124          * we will collect credits granted by the server in the mid callbacks
1125          * and add those credits to the server structure.
1126          */
1127
1128         /*
1129          * Compounding is never used during session establish.
1130          */
1131         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1132                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1133                                            rqst[0].rq_nvec);
1134
1135         for (i = 0; i < num_rqst; i++) {
1136                 rc = wait_for_response(server, midQ[i]);
1137                 if (rc != 0)
1138                         break;
1139         }
1140         if (rc != 0) {
1141                 for (; i < num_rqst; i++) {
1142                         cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1143                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1144                         send_cancel(server, &rqst[i], midQ[i]);
1145                         spin_lock(&GlobalMid_Lock);
1146                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1147                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1148                                 midQ[i]->callback = cifs_cancelled_callback;
1149                                 cancelled_mid[i] = true;
1150                                 credits[i].value = 0;
1151                         }
1152                         spin_unlock(&GlobalMid_Lock);
1153                 }
1154         }
1155
1156         for (i = 0; i < num_rqst; i++) {
1157                 if (rc < 0)
1158                         goto out;
1159
1160                 rc = cifs_sync_mid_result(midQ[i], server);
1161                 if (rc != 0) {
1162                         /* mark this mid as cancelled to not free it below */
1163                         cancelled_mid[i] = true;
1164                         goto out;
1165                 }
1166
1167                 if (!midQ[i]->resp_buf ||
1168                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1169                         rc = -EIO;
1170                         cifs_dbg(FYI, "Bad MID state?\n");
1171                         goto out;
1172                 }
1173
1174                 buf = (char *)midQ[i]->resp_buf;
1175                 resp_iov[i].iov_base = buf;
1176                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1177                         server->vals->header_preamble_size;
1178
1179                 if (midQ[i]->large_buf)
1180                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1181                 else
1182                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1183
1184                 rc = server->ops->check_receive(midQ[i], server,
1185                                                      flags & CIFS_LOG_ERROR);
1186
1187                 /* mark it so buf will not be freed by cifs_delete_mid */
1188                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1189                         midQ[i]->resp_buf = NULL;
1190
1191         }
1192
1193         /*
1194          * Compounding is never used during session establish.
1195          */
1196         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1197                 struct kvec iov = {
1198                         .iov_base = resp_iov[0].iov_base,
1199                         .iov_len = resp_iov[0].iov_len
1200                 };
1201                 smb311_update_preauth_hash(ses, &iov, 1);
1202         }
1203
1204 out:
1205         /*
1206          * This will dequeue all mids. After this it is important that the
1207          * demultiplex_thread will not process any of these mids any futher.
1208          * This is prevented above by using a noop callback that will not
1209          * wake this thread except for the very last PDU.
1210          */
1211         for (i = 0; i < num_rqst; i++) {
1212                 if (!cancelled_mid[i])
1213                         cifs_delete_mid(midQ[i]);
1214         }
1215
1216         return rc;
1217 }
1218
1219 int
1220 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1221                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1222                struct kvec *resp_iov)
1223 {
1224         return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1225                                   resp_iov);
1226 }
1227
1228 int
1229 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1230              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1231              const int flags, struct kvec *resp_iov)
1232 {
1233         struct smb_rqst rqst;
1234         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1235         int rc;
1236
1237         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1238                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1239                                         GFP_KERNEL);
1240                 if (!new_iov) {
1241                         /* otherwise cifs_send_recv below sets resp_buf_type */
1242                         *resp_buf_type = CIFS_NO_BUFFER;
1243                         return -ENOMEM;
1244                 }
1245         } else
1246                 new_iov = s_iov;
1247
1248         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1249         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1250
1251         new_iov[0].iov_base = new_iov[1].iov_base;
1252         new_iov[0].iov_len = 4;
1253         new_iov[1].iov_base += 4;
1254         new_iov[1].iov_len -= 4;
1255
1256         memset(&rqst, 0, sizeof(struct smb_rqst));
1257         rqst.rq_iov = new_iov;
1258         rqst.rq_nvec = n_vec + 1;
1259
1260         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1261         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1262                 kfree(new_iov);
1263         return rc;
1264 }
1265
1266 int
1267 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1268             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1269             int *pbytes_returned, const int flags)
1270 {
1271         int rc = 0;
1272         struct mid_q_entry *midQ;
1273         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1274         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1275         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1276         struct cifs_credits credits = { .value = 1, .instance = 0 };
1277         struct TCP_Server_Info *server;
1278
1279         if (ses == NULL) {
1280                 cifs_dbg(VFS, "Null smb session\n");
1281                 return -EIO;
1282         }
1283         server = ses->server;
1284         if (server == NULL) {
1285                 cifs_dbg(VFS, "Null tcp session\n");
1286                 return -EIO;
1287         }
1288
1289         if (server->tcpStatus == CifsExiting)
1290                 return -ENOENT;
1291
1292         /* Ensure that we do not send more than 50 overlapping requests
1293            to the same server. We may make this configurable later or
1294            use ses->maxReq */
1295
1296         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1297                 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1298                          len);
1299                 return -EIO;
1300         }
1301
1302         rc = wait_for_free_request(server, flags, &credits.instance);
1303         if (rc)
1304                 return rc;
1305
1306         /* make sure that we sign in the same order that we send on this socket
1307            and avoid races inside tcp sendmsg code that could cause corruption
1308            of smb data */
1309
1310         mutex_lock(&server->srv_mutex);
1311
1312         rc = allocate_mid(ses, in_buf, &midQ);
1313         if (rc) {
1314                 mutex_unlock(&server->srv_mutex);
1315                 /* Update # of requests on wire to server */
1316                 add_credits(server, &credits, 0);
1317                 return rc;
1318         }
1319
1320         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1321         if (rc) {
1322                 mutex_unlock(&server->srv_mutex);
1323                 goto out;
1324         }
1325
1326         midQ->mid_state = MID_REQUEST_SUBMITTED;
1327
1328         cifs_in_send_inc(server);
1329         rc = smb_send(server, in_buf, len);
1330         cifs_in_send_dec(server);
1331         cifs_save_when_sent(midQ);
1332
1333         if (rc < 0)
1334                 server->sequence_number -= 2;
1335
1336         mutex_unlock(&server->srv_mutex);
1337
1338         if (rc < 0)
1339                 goto out;
1340
1341         rc = wait_for_response(server, midQ);
1342         if (rc != 0) {
1343                 send_cancel(server, &rqst, midQ);
1344                 spin_lock(&GlobalMid_Lock);
1345                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1346                         /* no longer considered to be "in-flight" */
1347                         midQ->callback = DeleteMidQEntry;
1348                         spin_unlock(&GlobalMid_Lock);
1349                         add_credits(server, &credits, 0);
1350                         return rc;
1351                 }
1352                 spin_unlock(&GlobalMid_Lock);
1353         }
1354
1355         rc = cifs_sync_mid_result(midQ, server);
1356         if (rc != 0) {
1357                 add_credits(server, &credits, 0);
1358                 return rc;
1359         }
1360
1361         if (!midQ->resp_buf || !out_buf ||
1362             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1363                 rc = -EIO;
1364                 cifs_server_dbg(VFS, "Bad MID state?\n");
1365                 goto out;
1366         }
1367
1368         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1369         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1370         rc = cifs_check_receive(midQ, server, 0);
1371 out:
1372         cifs_delete_mid(midQ);
1373         add_credits(server, &credits, 0);
1374
1375         return rc;
1376 }
1377
1378 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1379    blocking lock to return. */
1380
1381 static int
1382 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1383                         struct smb_hdr *in_buf,
1384                         struct smb_hdr *out_buf)
1385 {
1386         int bytes_returned;
1387         struct cifs_ses *ses = tcon->ses;
1388         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1389
1390         /* We just modify the current in_buf to change
1391            the type of lock from LOCKING_ANDX_SHARED_LOCK
1392            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1393            LOCKING_ANDX_CANCEL_LOCK. */
1394
1395         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1396         pSMB->Timeout = 0;
1397         pSMB->hdr.Mid = get_next_mid(ses->server);
1398
1399         return SendReceive(xid, ses, in_buf, out_buf,
1400                         &bytes_returned, 0);
1401 }
1402
1403 int
1404 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1405             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1406             int *pbytes_returned)
1407 {
1408         int rc = 0;
1409         int rstart = 0;
1410         struct mid_q_entry *midQ;
1411         struct cifs_ses *ses;
1412         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1413         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1414         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1415         unsigned int instance;
1416         struct TCP_Server_Info *server;
1417
1418         if (tcon == NULL || tcon->ses == NULL) {
1419                 cifs_dbg(VFS, "Null smb session\n");
1420                 return -EIO;
1421         }
1422         ses = tcon->ses;
1423         server = ses->server;
1424
1425         if (server == NULL) {
1426                 cifs_dbg(VFS, "Null tcp session\n");
1427                 return -EIO;
1428         }
1429
1430         if (server->tcpStatus == CifsExiting)
1431                 return -ENOENT;
1432
1433         /* Ensure that we do not send more than 50 overlapping requests
1434            to the same server. We may make this configurable later or
1435            use ses->maxReq */
1436
1437         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1438                 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1439                          len);
1440                 return -EIO;
1441         }
1442
1443         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1444         if (rc)
1445                 return rc;
1446
1447         /* make sure that we sign in the same order that we send on this socket
1448            and avoid races inside tcp sendmsg code that could cause corruption
1449            of smb data */
1450
1451         mutex_lock(&server->srv_mutex);
1452
1453         rc = allocate_mid(ses, in_buf, &midQ);
1454         if (rc) {
1455                 mutex_unlock(&server->srv_mutex);
1456                 return rc;
1457         }
1458
1459         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1460         if (rc) {
1461                 cifs_delete_mid(midQ);
1462                 mutex_unlock(&server->srv_mutex);
1463                 return rc;
1464         }
1465
1466         midQ->mid_state = MID_REQUEST_SUBMITTED;
1467         cifs_in_send_inc(server);
1468         rc = smb_send(server, in_buf, len);
1469         cifs_in_send_dec(server);
1470         cifs_save_when_sent(midQ);
1471
1472         if (rc < 0)
1473                 server->sequence_number -= 2;
1474
1475         mutex_unlock(&server->srv_mutex);
1476
1477         if (rc < 0) {
1478                 cifs_delete_mid(midQ);
1479                 return rc;
1480         }
1481
1482         /* Wait for a reply - allow signals to interrupt. */
1483         rc = wait_event_interruptible(server->response_q,
1484                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1485                 ((server->tcpStatus != CifsGood) &&
1486                  (server->tcpStatus != CifsNew)));
1487
1488         /* Were we interrupted by a signal ? */
1489         if ((rc == -ERESTARTSYS) &&
1490                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1491                 ((server->tcpStatus == CifsGood) ||
1492                  (server->tcpStatus == CifsNew))) {
1493
1494                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1495                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1496                            blocking lock to return. */
1497                         rc = send_cancel(server, &rqst, midQ);
1498                         if (rc) {
1499                                 cifs_delete_mid(midQ);
1500                                 return rc;
1501                         }
1502                 } else {
1503                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1504                            to cause the blocking lock to return. */
1505
1506                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1507
1508                         /* If we get -ENOLCK back the lock may have
1509                            already been removed. Don't exit in this case. */
1510                         if (rc && rc != -ENOLCK) {
1511                                 cifs_delete_mid(midQ);
1512                                 return rc;
1513                         }
1514                 }
1515
1516                 rc = wait_for_response(server, midQ);
1517                 if (rc) {
1518                         send_cancel(server, &rqst, midQ);
1519                         spin_lock(&GlobalMid_Lock);
1520                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1521                                 /* no longer considered to be "in-flight" */
1522                                 midQ->callback = DeleteMidQEntry;
1523                                 spin_unlock(&GlobalMid_Lock);
1524                                 return rc;
1525                         }
1526                         spin_unlock(&GlobalMid_Lock);
1527                 }
1528
1529                 /* We got the response - restart system call. */
1530                 rstart = 1;
1531         }
1532
1533         rc = cifs_sync_mid_result(midQ, server);
1534         if (rc != 0)
1535                 return rc;
1536
1537         /* rcvd frame is ok */
1538         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1539                 rc = -EIO;
1540                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1541                 goto out;
1542         }
1543
1544         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1545         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1546         rc = cifs_check_receive(midQ, server, 0);
1547 out:
1548         cifs_delete_mid(midQ);
1549         if (rstart && rc == -EACCES)
1550                 return -ERESTARTSYS;
1551         return rc;
1552 }