Merge branch 'acpi-messages'
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         get_task_struct(current);
80         temp->creator = current;
81         temp->callback = cifs_wake_up_task;
82         temp->callback_data = current;
83
84         atomic_inc(&midCount);
85         temp->mid_state = MID_REQUEST_ALLOCATED;
86         return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91         struct mid_q_entry *midEntry =
92                         container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94         __le16 command = midEntry->server->vals->lock_cmd;
95         __u16 smb_cmd = le16_to_cpu(midEntry->command);
96         unsigned long now;
97         unsigned long roundtrip_time;
98 #endif
99         struct TCP_Server_Info *server = midEntry->server;
100
101         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103             server->ops->handle_cancelled_mid)
104                 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106         midEntry->mid_state = MID_FREE;
107         atomic_dec(&midCount);
108         if (midEntry->large_buf)
109                 cifs_buf_release(midEntry->resp_buf);
110         else
111                 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113         now = jiffies;
114         if (now < midEntry->when_alloc)
115                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116         roundtrip_time = now - midEntry->when_alloc;
117
118         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120                         server->slowest_cmd[smb_cmd] = roundtrip_time;
121                         server->fastest_cmd[smb_cmd] = roundtrip_time;
122                 } else {
123                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
125                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
127                 }
128                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129                 server->time_per_cmd[smb_cmd] += roundtrip_time;
130         }
131         /*
132          * commands taking longer than one second (default) can be indications
133          * that something is wrong, unless it is quite a slow link or a very
134          * busy server. Note that this calc is unlikely or impossible to wrap
135          * as long as slow_rsp_threshold is not set way above recommended max
136          * value (32767 ie 9 hours) and is generally harmless even if wrong
137          * since only affects debug counters - so leaving the calc as simple
138          * comparison rather than doing multiple conversions and overflow
139          * checks
140          */
141         if ((slow_rsp_threshold != 0) &&
142             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143             (midEntry->command != command)) {
144                 /*
145                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146                  * NB: le16_to_cpu returns unsigned so can not be negative below
147                  */
148                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152                                midEntry->when_sent, midEntry->when_received);
153                 if (cifsFYI & CIFS_TIMER) {
154                         pr_debug("slow rsp: cmd %d mid %llu",
155                                  midEntry->command, midEntry->mid);
156                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157                                   now - midEntry->when_alloc,
158                                   now - midEntry->when_sent,
159                                   now - midEntry->when_received);
160                 }
161         }
162 #endif
163         put_task_struct(midEntry->creator);
164
165         mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170         spin_lock(&GlobalMid_Lock);
171         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172         spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177         cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183         spin_lock(&GlobalMid_Lock);
184         if (!(mid->mid_flags & MID_DELETED)) {
185                 list_del_init(&mid->qhead);
186                 mid->mid_flags |= MID_DELETED;
187         }
188         spin_unlock(&GlobalMid_Lock);
189
190         DeleteMidQEntry(mid);
191 }
192
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:     Server to send the data to
196  * @smb_msg:    Message to send
197  * @sent:       amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204               size_t *sent)
205 {
206         int rc = 0;
207         int retries = 0;
208         struct socket *ssocket = server->ssocket;
209
210         *sent = 0;
211
212         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213         smb_msg->msg_namelen = sizeof(struct sockaddr);
214         smb_msg->msg_control = NULL;
215         smb_msg->msg_controllen = 0;
216         if (server->noblocksnd)
217                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218         else
219                 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221         while (msg_data_left(smb_msg)) {
222                 /*
223                  * If blocking send, we try 3 times, since each can block
224                  * for 5 seconds. For nonblocking  we have to try more
225                  * but wait increasing amounts of time allowing time for
226                  * socket to clear.  The overall time we wait in either
227                  * case to send on the socket is about 15 seconds.
228                  * Similarly we wait for 15 seconds for a response from
229                  * the server in SendReceive[2] for the server to send
230                  * a response back for most types of requests (except
231                  * SMB Write past end of file which can be slow, and
232                  * blocking lock operations). NFS waits slightly longer
233                  * than CIFS, but this can make it take longer for
234                  * nonresponsive servers to be detected and 15 seconds
235                  * is more than enough time for modern networks to
236                  * send a packet.  In most cases if we fail to send
237                  * after the retries we will kill the socket and
238                  * reconnect which may clear the network problem.
239                  */
240                 rc = sock_sendmsg(ssocket, smb_msg);
241                 if (rc == -EAGAIN) {
242                         retries++;
243                         if (retries >= 14 ||
244                             (!server->noblocksnd && (retries > 2))) {
245                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246                                          ssocket);
247                                 return -EAGAIN;
248                         }
249                         msleep(1 << retries);
250                         continue;
251                 }
252
253                 if (rc < 0)
254                         return rc;
255
256                 if (rc == 0) {
257                         /* should never happen, letting socket clear before
258                            retrying is our only obvious option here */
259                         cifs_server_dbg(VFS, "tcp sent no data\n");
260                         msleep(500);
261                         continue;
262                 }
263
264                 /* send was at least partially successful */
265                 *sent += rc;
266                 retries = 0; /* in case we get ENOSPC on the next send */
267         }
268         return 0;
269 }
270
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274         unsigned int i;
275         struct kvec *iov;
276         int nvec;
277         unsigned long buflen = 0;
278
279         if (server->vals->header_preamble_size == 0 &&
280             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281                 iov = &rqst->rq_iov[1];
282                 nvec = rqst->rq_nvec - 1;
283         } else {
284                 iov = rqst->rq_iov;
285                 nvec = rqst->rq_nvec;
286         }
287
288         /* total up iov array first */
289         for (i = 0; i < nvec; i++)
290                 buflen += iov[i].iov_len;
291
292         /*
293          * Add in the page array if there is one. The caller needs to make
294          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295          * multiple pages ends at page boundary, rq_tailsz needs to be set to
296          * PAGE_SIZE.
297          */
298         if (rqst->rq_npages) {
299                 if (rqst->rq_npages == 1)
300                         buflen += rqst->rq_tailsz;
301                 else {
302                         /*
303                          * If there is more than one page, calculate the
304                          * buffer length based on rq_offset and rq_tailsz
305                          */
306                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307                                         rqst->rq_offset;
308                         buflen += rqst->rq_tailsz;
309                 }
310         }
311
312         return buflen;
313 }
314
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317                 struct smb_rqst *rqst)
318 {
319         int rc = 0;
320         struct kvec *iov;
321         int n_vec;
322         unsigned int send_length = 0;
323         unsigned int i, j;
324         sigset_t mask, oldmask;
325         size_t total_len = 0, sent, size;
326         struct socket *ssocket = server->ssocket;
327         struct msghdr smb_msg;
328         __be32 rfc1002_marker;
329
330         if (cifs_rdma_enabled(server)) {
331                 /* return -EAGAIN when connecting or reconnecting */
332                 rc = -EAGAIN;
333                 if (server->smbd_conn)
334                         rc = smbd_send(server, num_rqst, rqst);
335                 goto smbd_done;
336         }
337
338         if (ssocket == NULL)
339                 return -EAGAIN;
340
341         if (fatal_signal_pending(current)) {
342                 cifs_dbg(FYI, "signal pending before send request\n");
343                 return -ERESTARTSYS;
344         }
345
346         /* cork the socket */
347         tcp_sock_set_cork(ssocket->sk, true);
348
349         for (j = 0; j < num_rqst; j++)
350                 send_length += smb_rqst_len(server, &rqst[j]);
351         rfc1002_marker = cpu_to_be32(send_length);
352
353         /*
354          * We should not allow signals to interrupt the network send because
355          * any partial send will cause session reconnects thus increasing
356          * latency of system calls and overload a server with unnecessary
357          * requests.
358          */
359
360         sigfillset(&mask);
361         sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
363         /* Generate a rfc1002 marker for SMB2+ */
364         if (server->vals->header_preamble_size == 0) {
365                 struct kvec hiov = {
366                         .iov_base = &rfc1002_marker,
367                         .iov_len  = 4
368                 };
369                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370                 rc = smb_send_kvec(server, &smb_msg, &sent);
371                 if (rc < 0)
372                         goto unmask;
373
374                 total_len += sent;
375                 send_length += 4;
376         }
377
378         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
380         for (j = 0; j < num_rqst; j++) {
381                 iov = rqst[j].rq_iov;
382                 n_vec = rqst[j].rq_nvec;
383
384                 size = 0;
385                 for (i = 0; i < n_vec; i++) {
386                         dump_smb(iov[i].iov_base, iov[i].iov_len);
387                         size += iov[i].iov_len;
388                 }
389
390                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
391
392                 rc = smb_send_kvec(server, &smb_msg, &sent);
393                 if (rc < 0)
394                         goto unmask;
395
396                 total_len += sent;
397
398                 /* now walk the page array and send each page in it */
399                 for (i = 0; i < rqst[j].rq_npages; i++) {
400                         struct bio_vec bvec;
401
402                         bvec.bv_page = rqst[j].rq_pages[i];
403                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404                                              &bvec.bv_offset);
405
406                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407                                       &bvec, 1, bvec.bv_len);
408                         rc = smb_send_kvec(server, &smb_msg, &sent);
409                         if (rc < 0)
410                                 break;
411
412                         total_len += sent;
413                 }
414         }
415
416 unmask:
417         sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419         /*
420          * If signal is pending but we have already sent the whole packet to
421          * the server we need to return success status to allow a corresponding
422          * mid entry to be kept in the pending requests queue thus allowing
423          * to handle responses from the server by the client.
424          *
425          * If only part of the packet has been sent there is no need to hide
426          * interrupt because the session will be reconnected anyway, so there
427          * won't be any response from the server to handle.
428          */
429
430         if (signal_pending(current) && (total_len != send_length)) {
431                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432                 rc = -ERESTARTSYS;
433         }
434
435         /* uncork it */
436         tcp_sock_set_cork(ssocket->sk, false);
437
438         if ((total_len > 0) && (total_len != send_length)) {
439                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440                          send_length, total_len);
441                 /*
442                  * If we have only sent part of an SMB then the next SMB could
443                  * be taken as the remainder of this one. We need to kill the
444                  * socket so the server throws away the partial SMB
445                  */
446                 server->tcpStatus = CifsNeedReconnect;
447                 trace_smb3_partial_send_reconnect(server->CurrentMid,
448                                                   server->hostname);
449         }
450 smbd_done:
451         if (rc < 0 && rc != -EINTR)
452                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453                          rc);
454         else if (rc > 0)
455                 rc = 0;
456
457         return rc;
458 }
459
460 static int
461 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462               struct smb_rqst *rqst, int flags)
463 {
464         struct kvec iov;
465         struct smb2_transform_hdr *tr_hdr;
466         struct smb_rqst cur_rqst[MAX_COMPOUND];
467         int rc;
468
469         if (!(flags & CIFS_TRANSFORM_REQ))
470                 return __smb_send_rqst(server, num_rqst, rqst);
471
472         if (num_rqst > MAX_COMPOUND - 1)
473                 return -ENOMEM;
474
475         if (!server->ops->init_transform_rq) {
476                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
477                 return -EIO;
478         }
479
480         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481         if (!tr_hdr)
482                 return -ENOMEM;
483
484         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485         memset(&iov, 0, sizeof(iov));
486         memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488         iov.iov_base = tr_hdr;
489         iov.iov_len = sizeof(*tr_hdr);
490         cur_rqst[0].rq_iov = &iov;
491         cur_rqst[0].rq_nvec = 1;
492
493         rc = server->ops->init_transform_rq(server, num_rqst + 1,
494                                             &cur_rqst[0], rqst);
495         if (rc)
496                 goto out;
497
498         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
500 out:
501         kfree(tr_hdr);
502         return rc;
503 }
504
505 int
506 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507          unsigned int smb_buf_length)
508 {
509         struct kvec iov[2];
510         struct smb_rqst rqst = { .rq_iov = iov,
511                                  .rq_nvec = 2 };
512
513         iov[0].iov_base = smb_buffer;
514         iov[0].iov_len = 4;
515         iov[1].iov_base = (char *)smb_buffer + 4;
516         iov[1].iov_len = smb_buf_length;
517
518         return __smb_send_rqst(server, 1, &rqst);
519 }
520
521 static int
522 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
523                       const int timeout, const int flags,
524                       unsigned int *instance)
525 {
526         long rc;
527         int *credits;
528         int optype;
529         long int t;
530         int scredits = server->credits;
531
532         if (timeout < 0)
533                 t = MAX_JIFFY_OFFSET;
534         else
535                 t = msecs_to_jiffies(timeout);
536
537         optype = flags & CIFS_OP_MASK;
538
539         *instance = 0;
540
541         credits = server->ops->get_credits_field(server, optype);
542         /* Since an echo is already inflight, no need to wait to send another */
543         if (*credits <= 0 && optype == CIFS_ECHO_OP)
544                 return -EAGAIN;
545
546         spin_lock(&server->req_lock);
547         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
548                 /* oplock breaks must not be held up */
549                 server->in_flight++;
550                 if (server->in_flight > server->max_in_flight)
551                         server->max_in_flight = server->in_flight;
552                 *credits -= 1;
553                 *instance = server->reconnect_instance;
554                 spin_unlock(&server->req_lock);
555                 return 0;
556         }
557
558         while (1) {
559                 if (*credits < num_credits) {
560                         spin_unlock(&server->req_lock);
561                         cifs_num_waiters_inc(server);
562                         rc = wait_event_killable_timeout(server->request_q,
563                                 has_credits(server, credits, num_credits), t);
564                         cifs_num_waiters_dec(server);
565                         if (!rc) {
566                                 trace_smb3_credit_timeout(server->CurrentMid,
567                                         server->hostname, num_credits, 0);
568                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
569                                          timeout);
570                                 return -ENOTSUPP;
571                         }
572                         if (rc == -ERESTARTSYS)
573                                 return -ERESTARTSYS;
574                         spin_lock(&server->req_lock);
575                 } else {
576                         if (server->tcpStatus == CifsExiting) {
577                                 spin_unlock(&server->req_lock);
578                                 return -ENOENT;
579                         }
580
581                         /*
582                          * For normal commands, reserve the last MAX_COMPOUND
583                          * credits to compound requests.
584                          * Otherwise these compounds could be permanently
585                          * starved for credits by single-credit requests.
586                          *
587                          * To prevent spinning CPU, block this thread until
588                          * there are >MAX_COMPOUND credits available.
589                          * But only do this is we already have a lot of
590                          * credits in flight to avoid triggering this check
591                          * for servers that are slow to hand out credits on
592                          * new sessions.
593                          */
594                         if (!optype && num_credits == 1 &&
595                             server->in_flight > 2 * MAX_COMPOUND &&
596                             *credits <= MAX_COMPOUND) {
597                                 spin_unlock(&server->req_lock);
598                                 cifs_num_waiters_inc(server);
599                                 rc = wait_event_killable_timeout(
600                                         server->request_q,
601                                         has_credits(server, credits,
602                                                     MAX_COMPOUND + 1),
603                                         t);
604                                 cifs_num_waiters_dec(server);
605                                 if (!rc) {
606                                         trace_smb3_credit_timeout(
607                                                 server->CurrentMid,
608                                                 server->hostname, num_credits,
609                                                 0);
610                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
611                                                  timeout);
612                                         return -ENOTSUPP;
613                                 }
614                                 if (rc == -ERESTARTSYS)
615                                         return -ERESTARTSYS;
616                                 spin_lock(&server->req_lock);
617                                 continue;
618                         }
619
620                         /*
621                          * Can not count locking commands against total
622                          * as they are allowed to block on server.
623                          */
624
625                         /* update # of requests on the wire to server */
626                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
627                                 *credits -= num_credits;
628                                 scredits = *credits;
629                                 server->in_flight += num_credits;
630                                 if (server->in_flight > server->max_in_flight)
631                                         server->max_in_flight = server->in_flight;
632                                 *instance = server->reconnect_instance;
633                         }
634                         spin_unlock(&server->req_lock);
635
636                         trace_smb3_add_credits(server->CurrentMid,
637                                         server->hostname, scredits, -(num_credits));
638                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
639                                         __func__, num_credits, scredits);
640                         break;
641                 }
642         }
643         return 0;
644 }
645
646 static int
647 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
648                       unsigned int *instance)
649 {
650         return wait_for_free_credits(server, 1, -1, flags,
651                                      instance);
652 }
653
654 static int
655 wait_for_compound_request(struct TCP_Server_Info *server, int num,
656                           const int flags, unsigned int *instance)
657 {
658         int *credits;
659         int scredits, sin_flight;
660
661         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
662
663         spin_lock(&server->req_lock);
664         scredits = *credits;
665         sin_flight = server->in_flight;
666
667         if (*credits < num) {
668                 /*
669                  * If the server is tight on resources or just gives us less
670                  * credits for other reasons (e.g. requests are coming out of
671                  * order and the server delays granting more credits until it
672                  * processes a missing mid) and we exhausted most available
673                  * credits there may be situations when we try to send
674                  * a compound request but we don't have enough credits. At this
675                  * point the client needs to decide if it should wait for
676                  * additional credits or fail the request. If at least one
677                  * request is in flight there is a high probability that the
678                  * server will return enough credits to satisfy this compound
679                  * request.
680                  *
681                  * Return immediately if no requests in flight since we will be
682                  * stuck on waiting for credits.
683                  */
684                 if (server->in_flight == 0) {
685                         spin_unlock(&server->req_lock);
686                         trace_smb3_insufficient_credits(server->CurrentMid,
687                                         server->hostname, scredits, sin_flight);
688                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
689                                         __func__, sin_flight, num, scredits);
690                         return -ENOTSUPP;
691                 }
692         }
693         spin_unlock(&server->req_lock);
694
695         return wait_for_free_credits(server, num, 60000, flags,
696                                      instance);
697 }
698
699 int
700 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
701                       unsigned int *num, struct cifs_credits *credits)
702 {
703         *num = size;
704         credits->value = 0;
705         credits->instance = server->reconnect_instance;
706         return 0;
707 }
708
709 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
710                         struct mid_q_entry **ppmidQ)
711 {
712         if (ses->server->tcpStatus == CifsExiting) {
713                 return -ENOENT;
714         }
715
716         if (ses->server->tcpStatus == CifsNeedReconnect) {
717                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
718                 return -EAGAIN;
719         }
720
721         if (ses->status == CifsNew) {
722                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
723                         (in_buf->Command != SMB_COM_NEGOTIATE))
724                         return -EAGAIN;
725                 /* else ok - we are setting up session */
726         }
727
728         if (ses->status == CifsExiting) {
729                 /* check if SMB session is bad because we are setting it up */
730                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
731                         return -EAGAIN;
732                 /* else ok - we are shutting down session */
733         }
734
735         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
736         if (*ppmidQ == NULL)
737                 return -ENOMEM;
738         spin_lock(&GlobalMid_Lock);
739         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
740         spin_unlock(&GlobalMid_Lock);
741         return 0;
742 }
743
744 static int
745 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
746 {
747         int error;
748
749         error = wait_event_freezekillable_unsafe(server->response_q,
750                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
751         if (error < 0)
752                 return -ERESTARTSYS;
753
754         return 0;
755 }
756
757 struct mid_q_entry *
758 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
759 {
760         int rc;
761         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
762         struct mid_q_entry *mid;
763
764         if (rqst->rq_iov[0].iov_len != 4 ||
765             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
766                 return ERR_PTR(-EIO);
767
768         /* enable signing if server requires it */
769         if (server->sign)
770                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
771
772         mid = AllocMidQEntry(hdr, server);
773         if (mid == NULL)
774                 return ERR_PTR(-ENOMEM);
775
776         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
777         if (rc) {
778                 DeleteMidQEntry(mid);
779                 return ERR_PTR(rc);
780         }
781
782         return mid;
783 }
784
785 /*
786  * Send a SMB request and set the callback function in the mid to handle
787  * the result. Caller is responsible for dealing with timeouts.
788  */
789 int
790 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
791                 mid_receive_t *receive, mid_callback_t *callback,
792                 mid_handle_t *handle, void *cbdata, const int flags,
793                 const struct cifs_credits *exist_credits)
794 {
795         int rc;
796         struct mid_q_entry *mid;
797         struct cifs_credits credits = { .value = 0, .instance = 0 };
798         unsigned int instance;
799         int optype;
800
801         optype = flags & CIFS_OP_MASK;
802
803         if ((flags & CIFS_HAS_CREDITS) == 0) {
804                 rc = wait_for_free_request(server, flags, &instance);
805                 if (rc)
806                         return rc;
807                 credits.value = 1;
808                 credits.instance = instance;
809         } else
810                 instance = exist_credits->instance;
811
812         mutex_lock(&server->srv_mutex);
813
814         /*
815          * We can't use credits obtained from the previous session to send this
816          * request. Check if there were reconnects after we obtained credits and
817          * return -EAGAIN in such cases to let callers handle it.
818          */
819         if (instance != server->reconnect_instance) {
820                 mutex_unlock(&server->srv_mutex);
821                 add_credits_and_wake_if(server, &credits, optype);
822                 return -EAGAIN;
823         }
824
825         mid = server->ops->setup_async_request(server, rqst);
826         if (IS_ERR(mid)) {
827                 mutex_unlock(&server->srv_mutex);
828                 add_credits_and_wake_if(server, &credits, optype);
829                 return PTR_ERR(mid);
830         }
831
832         mid->receive = receive;
833         mid->callback = callback;
834         mid->callback_data = cbdata;
835         mid->handle = handle;
836         mid->mid_state = MID_REQUEST_SUBMITTED;
837
838         /* put it on the pending_mid_q */
839         spin_lock(&GlobalMid_Lock);
840         list_add_tail(&mid->qhead, &server->pending_mid_q);
841         spin_unlock(&GlobalMid_Lock);
842
843         /*
844          * Need to store the time in mid before calling I/O. For call_async,
845          * I/O response may come back and free the mid entry on another thread.
846          */
847         cifs_save_when_sent(mid);
848         cifs_in_send_inc(server);
849         rc = smb_send_rqst(server, 1, rqst, flags);
850         cifs_in_send_dec(server);
851
852         if (rc < 0) {
853                 revert_current_mid(server, mid->credits);
854                 server->sequence_number -= 2;
855                 cifs_delete_mid(mid);
856         }
857
858         mutex_unlock(&server->srv_mutex);
859
860         if (rc == 0)
861                 return 0;
862
863         add_credits_and_wake_if(server, &credits, optype);
864         return rc;
865 }
866
867 /*
868  *
869  * Send an SMB Request.  No response info (other than return code)
870  * needs to be parsed.
871  *
872  * flags indicate the type of request buffer and how long to wait
873  * and whether to log NT STATUS code (error) before mapping it to POSIX error
874  *
875  */
876 int
877 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
878                  char *in_buf, int flags)
879 {
880         int rc;
881         struct kvec iov[1];
882         struct kvec rsp_iov;
883         int resp_buf_type;
884
885         iov[0].iov_base = in_buf;
886         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
887         flags |= CIFS_NO_RSP_BUF;
888         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
889         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
890
891         return rc;
892 }
893
894 static int
895 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
896 {
897         int rc = 0;
898
899         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
900                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
901
902         spin_lock(&GlobalMid_Lock);
903         switch (mid->mid_state) {
904         case MID_RESPONSE_RECEIVED:
905                 spin_unlock(&GlobalMid_Lock);
906                 return rc;
907         case MID_RETRY_NEEDED:
908                 rc = -EAGAIN;
909                 break;
910         case MID_RESPONSE_MALFORMED:
911                 rc = -EIO;
912                 break;
913         case MID_SHUTDOWN:
914                 rc = -EHOSTDOWN;
915                 break;
916         default:
917                 if (!(mid->mid_flags & MID_DELETED)) {
918                         list_del_init(&mid->qhead);
919                         mid->mid_flags |= MID_DELETED;
920                 }
921                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
922                          __func__, mid->mid, mid->mid_state);
923                 rc = -EIO;
924         }
925         spin_unlock(&GlobalMid_Lock);
926
927         DeleteMidQEntry(mid);
928         return rc;
929 }
930
931 static inline int
932 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
933             struct mid_q_entry *mid)
934 {
935         return server->ops->send_cancel ?
936                                 server->ops->send_cancel(server, rqst, mid) : 0;
937 }
938
939 int
940 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
941                    bool log_error)
942 {
943         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
944
945         dump_smb(mid->resp_buf, min_t(u32, 92, len));
946
947         /* convert the length into a more usable form */
948         if (server->sign) {
949                 struct kvec iov[2];
950                 int rc = 0;
951                 struct smb_rqst rqst = { .rq_iov = iov,
952                                          .rq_nvec = 2 };
953
954                 iov[0].iov_base = mid->resp_buf;
955                 iov[0].iov_len = 4;
956                 iov[1].iov_base = (char *)mid->resp_buf + 4;
957                 iov[1].iov_len = len - 4;
958                 /* FIXME: add code to kill session */
959                 rc = cifs_verify_signature(&rqst, server,
960                                            mid->sequence_number);
961                 if (rc)
962                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
963                                  rc);
964         }
965
966         /* BB special case reconnect tid and uid here? */
967         return map_and_check_smb_error(mid, log_error);
968 }
969
970 struct mid_q_entry *
971 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
972                    struct smb_rqst *rqst)
973 {
974         int rc;
975         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
976         struct mid_q_entry *mid;
977
978         if (rqst->rq_iov[0].iov_len != 4 ||
979             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
980                 return ERR_PTR(-EIO);
981
982         rc = allocate_mid(ses, hdr, &mid);
983         if (rc)
984                 return ERR_PTR(rc);
985         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
986         if (rc) {
987                 cifs_delete_mid(mid);
988                 return ERR_PTR(rc);
989         }
990         return mid;
991 }
992
993 static void
994 cifs_compound_callback(struct mid_q_entry *mid)
995 {
996         struct TCP_Server_Info *server = mid->server;
997         struct cifs_credits credits;
998
999         credits.value = server->ops->get_credits(mid);
1000         credits.instance = server->reconnect_instance;
1001
1002         add_credits(server, &credits, mid->optype);
1003 }
1004
1005 static void
1006 cifs_compound_last_callback(struct mid_q_entry *mid)
1007 {
1008         cifs_compound_callback(mid);
1009         cifs_wake_up_task(mid);
1010 }
1011
1012 static void
1013 cifs_cancelled_callback(struct mid_q_entry *mid)
1014 {
1015         cifs_compound_callback(mid);
1016         DeleteMidQEntry(mid);
1017 }
1018
1019 /*
1020  * Return a channel (master if none) of @ses that can be used to send
1021  * regular requests.
1022  *
1023  * If we are currently binding a new channel (negprot/sess.setup),
1024  * return the new incomplete channel.
1025  */
1026 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1027 {
1028         uint index = 0;
1029
1030         if (!ses)
1031                 return NULL;
1032
1033         if (!ses->binding) {
1034                 /* round robin */
1035                 if (ses->chan_count > 1) {
1036                         index = (uint)atomic_inc_return(&ses->chan_seq);
1037                         index %= ses->chan_count;
1038                 }
1039                 return ses->chans[index].server;
1040         } else {
1041                 return cifs_ses_server(ses);
1042         }
1043 }
1044
1045 int
1046 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1047                    struct TCP_Server_Info *server,
1048                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1049                    int *resp_buf_type, struct kvec *resp_iov)
1050 {
1051         int i, j, optype, rc = 0;
1052         struct mid_q_entry *midQ[MAX_COMPOUND];
1053         bool cancelled_mid[MAX_COMPOUND] = {false};
1054         struct cifs_credits credits[MAX_COMPOUND] = {
1055                 { .value = 0, .instance = 0 }
1056         };
1057         unsigned int instance;
1058         char *buf;
1059
1060         optype = flags & CIFS_OP_MASK;
1061
1062         for (i = 0; i < num_rqst; i++)
1063                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1064
1065         if (!ses || !ses->server || !server) {
1066                 cifs_dbg(VFS, "Null session\n");
1067                 return -EIO;
1068         }
1069
1070         if (server->tcpStatus == CifsExiting)
1071                 return -ENOENT;
1072
1073         /*
1074          * Wait for all the requests to become available.
1075          * This approach still leaves the possibility to be stuck waiting for
1076          * credits if the server doesn't grant credits to the outstanding
1077          * requests and if the client is completely idle, not generating any
1078          * other requests.
1079          * This can be handled by the eventual session reconnect.
1080          */
1081         rc = wait_for_compound_request(server, num_rqst, flags,
1082                                        &instance);
1083         if (rc)
1084                 return rc;
1085
1086         for (i = 0; i < num_rqst; i++) {
1087                 credits[i].value = 1;
1088                 credits[i].instance = instance;
1089         }
1090
1091         /*
1092          * Make sure that we sign in the same order that we send on this socket
1093          * and avoid races inside tcp sendmsg code that could cause corruption
1094          * of smb data.
1095          */
1096
1097         mutex_lock(&server->srv_mutex);
1098
1099         /*
1100          * All the parts of the compound chain belong obtained credits from the
1101          * same session. We can not use credits obtained from the previous
1102          * session to send this request. Check if there were reconnects after
1103          * we obtained credits and return -EAGAIN in such cases to let callers
1104          * handle it.
1105          */
1106         if (instance != server->reconnect_instance) {
1107                 mutex_unlock(&server->srv_mutex);
1108                 for (j = 0; j < num_rqst; j++)
1109                         add_credits(server, &credits[j], optype);
1110                 return -EAGAIN;
1111         }
1112
1113         for (i = 0; i < num_rqst; i++) {
1114                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1115                 if (IS_ERR(midQ[i])) {
1116                         revert_current_mid(server, i);
1117                         for (j = 0; j < i; j++)
1118                                 cifs_delete_mid(midQ[j]);
1119                         mutex_unlock(&server->srv_mutex);
1120
1121                         /* Update # of requests on wire to server */
1122                         for (j = 0; j < num_rqst; j++)
1123                                 add_credits(server, &credits[j], optype);
1124                         return PTR_ERR(midQ[i]);
1125                 }
1126
1127                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1128                 midQ[i]->optype = optype;
1129                 /*
1130                  * Invoke callback for every part of the compound chain
1131                  * to calculate credits properly. Wake up this thread only when
1132                  * the last element is received.
1133                  */
1134                 if (i < num_rqst - 1)
1135                         midQ[i]->callback = cifs_compound_callback;
1136                 else
1137                         midQ[i]->callback = cifs_compound_last_callback;
1138         }
1139         cifs_in_send_inc(server);
1140         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1141         cifs_in_send_dec(server);
1142
1143         for (i = 0; i < num_rqst; i++)
1144                 cifs_save_when_sent(midQ[i]);
1145
1146         if (rc < 0) {
1147                 revert_current_mid(server, num_rqst);
1148                 server->sequence_number -= 2;
1149         }
1150
1151         mutex_unlock(&server->srv_mutex);
1152
1153         /*
1154          * If sending failed for some reason or it is an oplock break that we
1155          * will not receive a response to - return credits back
1156          */
1157         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1158                 for (i = 0; i < num_rqst; i++)
1159                         add_credits(server, &credits[i], optype);
1160                 goto out;
1161         }
1162
1163         /*
1164          * At this point the request is passed to the network stack - we assume
1165          * that any credits taken from the server structure on the client have
1166          * been spent and we can't return them back. Once we receive responses
1167          * we will collect credits granted by the server in the mid callbacks
1168          * and add those credits to the server structure.
1169          */
1170
1171         /*
1172          * Compounding is never used during session establish.
1173          */
1174         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1175                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1176                                            rqst[0].rq_nvec);
1177
1178         for (i = 0; i < num_rqst; i++) {
1179                 rc = wait_for_response(server, midQ[i]);
1180                 if (rc != 0)
1181                         break;
1182         }
1183         if (rc != 0) {
1184                 for (; i < num_rqst; i++) {
1185                         cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1186                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1187                         send_cancel(server, &rqst[i], midQ[i]);
1188                         spin_lock(&GlobalMid_Lock);
1189                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1190                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1191                                 midQ[i]->callback = cifs_cancelled_callback;
1192                                 cancelled_mid[i] = true;
1193                                 credits[i].value = 0;
1194                         }
1195                         spin_unlock(&GlobalMid_Lock);
1196                 }
1197         }
1198
1199         for (i = 0; i < num_rqst; i++) {
1200                 if (rc < 0)
1201                         goto out;
1202
1203                 rc = cifs_sync_mid_result(midQ[i], server);
1204                 if (rc != 0) {
1205                         /* mark this mid as cancelled to not free it below */
1206                         cancelled_mid[i] = true;
1207                         goto out;
1208                 }
1209
1210                 if (!midQ[i]->resp_buf ||
1211                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1212                         rc = -EIO;
1213                         cifs_dbg(FYI, "Bad MID state?\n");
1214                         goto out;
1215                 }
1216
1217                 buf = (char *)midQ[i]->resp_buf;
1218                 resp_iov[i].iov_base = buf;
1219                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1220                         server->vals->header_preamble_size;
1221
1222                 if (midQ[i]->large_buf)
1223                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1224                 else
1225                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1226
1227                 rc = server->ops->check_receive(midQ[i], server,
1228                                                      flags & CIFS_LOG_ERROR);
1229
1230                 /* mark it so buf will not be freed by cifs_delete_mid */
1231                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1232                         midQ[i]->resp_buf = NULL;
1233
1234         }
1235
1236         /*
1237          * Compounding is never used during session establish.
1238          */
1239         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1240                 struct kvec iov = {
1241                         .iov_base = resp_iov[0].iov_base,
1242                         .iov_len = resp_iov[0].iov_len
1243                 };
1244                 smb311_update_preauth_hash(ses, &iov, 1);
1245         }
1246
1247 out:
1248         /*
1249          * This will dequeue all mids. After this it is important that the
1250          * demultiplex_thread will not process any of these mids any futher.
1251          * This is prevented above by using a noop callback that will not
1252          * wake this thread except for the very last PDU.
1253          */
1254         for (i = 0; i < num_rqst; i++) {
1255                 if (!cancelled_mid[i])
1256                         cifs_delete_mid(midQ[i]);
1257         }
1258
1259         return rc;
1260 }
1261
1262 int
1263 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1264                struct TCP_Server_Info *server,
1265                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1266                struct kvec *resp_iov)
1267 {
1268         return compound_send_recv(xid, ses, server, flags, 1,
1269                                   rqst, resp_buf_type, resp_iov);
1270 }
1271
1272 int
1273 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1274              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1275              const int flags, struct kvec *resp_iov)
1276 {
1277         struct smb_rqst rqst;
1278         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1279         int rc;
1280
1281         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1282                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1283                                         GFP_KERNEL);
1284                 if (!new_iov) {
1285                         /* otherwise cifs_send_recv below sets resp_buf_type */
1286                         *resp_buf_type = CIFS_NO_BUFFER;
1287                         return -ENOMEM;
1288                 }
1289         } else
1290                 new_iov = s_iov;
1291
1292         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1293         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1294
1295         new_iov[0].iov_base = new_iov[1].iov_base;
1296         new_iov[0].iov_len = 4;
1297         new_iov[1].iov_base += 4;
1298         new_iov[1].iov_len -= 4;
1299
1300         memset(&rqst, 0, sizeof(struct smb_rqst));
1301         rqst.rq_iov = new_iov;
1302         rqst.rq_nvec = n_vec + 1;
1303
1304         rc = cifs_send_recv(xid, ses, ses->server,
1305                             &rqst, resp_buf_type, flags, resp_iov);
1306         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1307                 kfree(new_iov);
1308         return rc;
1309 }
1310
1311 int
1312 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1313             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1314             int *pbytes_returned, const int flags)
1315 {
1316         int rc = 0;
1317         struct mid_q_entry *midQ;
1318         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1319         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1320         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1321         struct cifs_credits credits = { .value = 1, .instance = 0 };
1322         struct TCP_Server_Info *server;
1323
1324         if (ses == NULL) {
1325                 cifs_dbg(VFS, "Null smb session\n");
1326                 return -EIO;
1327         }
1328         server = ses->server;
1329         if (server == NULL) {
1330                 cifs_dbg(VFS, "Null tcp session\n");
1331                 return -EIO;
1332         }
1333
1334         if (server->tcpStatus == CifsExiting)
1335                 return -ENOENT;
1336
1337         /* Ensure that we do not send more than 50 overlapping requests
1338            to the same server. We may make this configurable later or
1339            use ses->maxReq */
1340
1341         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1342                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1343                                 len);
1344                 return -EIO;
1345         }
1346
1347         rc = wait_for_free_request(server, flags, &credits.instance);
1348         if (rc)
1349                 return rc;
1350
1351         /* make sure that we sign in the same order that we send on this socket
1352            and avoid races inside tcp sendmsg code that could cause corruption
1353            of smb data */
1354
1355         mutex_lock(&server->srv_mutex);
1356
1357         rc = allocate_mid(ses, in_buf, &midQ);
1358         if (rc) {
1359                 mutex_unlock(&server->srv_mutex);
1360                 /* Update # of requests on wire to server */
1361                 add_credits(server, &credits, 0);
1362                 return rc;
1363         }
1364
1365         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1366         if (rc) {
1367                 mutex_unlock(&server->srv_mutex);
1368                 goto out;
1369         }
1370
1371         midQ->mid_state = MID_REQUEST_SUBMITTED;
1372
1373         cifs_in_send_inc(server);
1374         rc = smb_send(server, in_buf, len);
1375         cifs_in_send_dec(server);
1376         cifs_save_when_sent(midQ);
1377
1378         if (rc < 0)
1379                 server->sequence_number -= 2;
1380
1381         mutex_unlock(&server->srv_mutex);
1382
1383         if (rc < 0)
1384                 goto out;
1385
1386         rc = wait_for_response(server, midQ);
1387         if (rc != 0) {
1388                 send_cancel(server, &rqst, midQ);
1389                 spin_lock(&GlobalMid_Lock);
1390                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1391                         /* no longer considered to be "in-flight" */
1392                         midQ->callback = DeleteMidQEntry;
1393                         spin_unlock(&GlobalMid_Lock);
1394                         add_credits(server, &credits, 0);
1395                         return rc;
1396                 }
1397                 spin_unlock(&GlobalMid_Lock);
1398         }
1399
1400         rc = cifs_sync_mid_result(midQ, server);
1401         if (rc != 0) {
1402                 add_credits(server, &credits, 0);
1403                 return rc;
1404         }
1405
1406         if (!midQ->resp_buf || !out_buf ||
1407             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1408                 rc = -EIO;
1409                 cifs_server_dbg(VFS, "Bad MID state?\n");
1410                 goto out;
1411         }
1412
1413         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1414         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1415         rc = cifs_check_receive(midQ, server, 0);
1416 out:
1417         cifs_delete_mid(midQ);
1418         add_credits(server, &credits, 0);
1419
1420         return rc;
1421 }
1422
1423 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1424    blocking lock to return. */
1425
1426 static int
1427 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1428                         struct smb_hdr *in_buf,
1429                         struct smb_hdr *out_buf)
1430 {
1431         int bytes_returned;
1432         struct cifs_ses *ses = tcon->ses;
1433         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1434
1435         /* We just modify the current in_buf to change
1436            the type of lock from LOCKING_ANDX_SHARED_LOCK
1437            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1438            LOCKING_ANDX_CANCEL_LOCK. */
1439
1440         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1441         pSMB->Timeout = 0;
1442         pSMB->hdr.Mid = get_next_mid(ses->server);
1443
1444         return SendReceive(xid, ses, in_buf, out_buf,
1445                         &bytes_returned, 0);
1446 }
1447
1448 int
1449 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1450             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1451             int *pbytes_returned)
1452 {
1453         int rc = 0;
1454         int rstart = 0;
1455         struct mid_q_entry *midQ;
1456         struct cifs_ses *ses;
1457         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1458         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1459         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1460         unsigned int instance;
1461         struct TCP_Server_Info *server;
1462
1463         if (tcon == NULL || tcon->ses == NULL) {
1464                 cifs_dbg(VFS, "Null smb session\n");
1465                 return -EIO;
1466         }
1467         ses = tcon->ses;
1468         server = ses->server;
1469
1470         if (server == NULL) {
1471                 cifs_dbg(VFS, "Null tcp session\n");
1472                 return -EIO;
1473         }
1474
1475         if (server->tcpStatus == CifsExiting)
1476                 return -ENOENT;
1477
1478         /* Ensure that we do not send more than 50 overlapping requests
1479            to the same server. We may make this configurable later or
1480            use ses->maxReq */
1481
1482         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1483                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1484                               len);
1485                 return -EIO;
1486         }
1487
1488         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1489         if (rc)
1490                 return rc;
1491
1492         /* make sure that we sign in the same order that we send on this socket
1493            and avoid races inside tcp sendmsg code that could cause corruption
1494            of smb data */
1495
1496         mutex_lock(&server->srv_mutex);
1497
1498         rc = allocate_mid(ses, in_buf, &midQ);
1499         if (rc) {
1500                 mutex_unlock(&server->srv_mutex);
1501                 return rc;
1502         }
1503
1504         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1505         if (rc) {
1506                 cifs_delete_mid(midQ);
1507                 mutex_unlock(&server->srv_mutex);
1508                 return rc;
1509         }
1510
1511         midQ->mid_state = MID_REQUEST_SUBMITTED;
1512         cifs_in_send_inc(server);
1513         rc = smb_send(server, in_buf, len);
1514         cifs_in_send_dec(server);
1515         cifs_save_when_sent(midQ);
1516
1517         if (rc < 0)
1518                 server->sequence_number -= 2;
1519
1520         mutex_unlock(&server->srv_mutex);
1521
1522         if (rc < 0) {
1523                 cifs_delete_mid(midQ);
1524                 return rc;
1525         }
1526
1527         /* Wait for a reply - allow signals to interrupt. */
1528         rc = wait_event_interruptible(server->response_q,
1529                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1530                 ((server->tcpStatus != CifsGood) &&
1531                  (server->tcpStatus != CifsNew)));
1532
1533         /* Were we interrupted by a signal ? */
1534         if ((rc == -ERESTARTSYS) &&
1535                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1536                 ((server->tcpStatus == CifsGood) ||
1537                  (server->tcpStatus == CifsNew))) {
1538
1539                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1540                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1541                            blocking lock to return. */
1542                         rc = send_cancel(server, &rqst, midQ);
1543                         if (rc) {
1544                                 cifs_delete_mid(midQ);
1545                                 return rc;
1546                         }
1547                 } else {
1548                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1549                            to cause the blocking lock to return. */
1550
1551                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1552
1553                         /* If we get -ENOLCK back the lock may have
1554                            already been removed. Don't exit in this case. */
1555                         if (rc && rc != -ENOLCK) {
1556                                 cifs_delete_mid(midQ);
1557                                 return rc;
1558                         }
1559                 }
1560
1561                 rc = wait_for_response(server, midQ);
1562                 if (rc) {
1563                         send_cancel(server, &rqst, midQ);
1564                         spin_lock(&GlobalMid_Lock);
1565                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1566                                 /* no longer considered to be "in-flight" */
1567                                 midQ->callback = DeleteMidQEntry;
1568                                 spin_unlock(&GlobalMid_Lock);
1569                                 return rc;
1570                         }
1571                         spin_unlock(&GlobalMid_Lock);
1572                 }
1573
1574                 /* We got the response - restart system call. */
1575                 rstart = 1;
1576         }
1577
1578         rc = cifs_sync_mid_result(midQ, server);
1579         if (rc != 0)
1580                 return rc;
1581
1582         /* rcvd frame is ok */
1583         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1584                 rc = -EIO;
1585                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1586                 goto out;
1587         }
1588
1589         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1590         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1591         rc = cifs_check_receive(midQ, server, 0);
1592 out:
1593         cifs_delete_mid(midQ);
1594         if (rstart && rc == -EACCES)
1595                 return -ERESTARTSYS;
1596         return rc;
1597 }