Merge tag 'acpi-5.14-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *   fs/cifs/transport.c
4  *
5  *   Copyright (C) International Business Machines  Corp., 2002,2008
6  *   Author(s): Steve French (sfrench@us.ibm.com)
7  *   Jeremy Allison (jra@samba.org) 2006.
8  *
9  */
10
11 #include <linux/fs.h>
12 #include <linux/list.h>
13 #include <linux/gfp.h>
14 #include <linux/wait.h>
15 #include <linux/net.h>
16 #include <linux/delay.h>
17 #include <linux/freezer.h>
18 #include <linux/tcp.h>
19 #include <linux/bvec.h>
20 #include <linux/highmem.h>
21 #include <linux/uaccess.h>
22 #include <asm/processor.h>
23 #include <linux/mempool.h>
24 #include <linux/sched/signal.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31
32 /* Max number of iovectors we can use off the stack when sending requests. */
33 #define CIFS_MAX_IOV_SIZE 8
34
35 void
36 cifs_wake_up_task(struct mid_q_entry *mid)
37 {
38         wake_up_process(mid->callback_data);
39 }
40
41 struct mid_q_entry *
42 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
43 {
44         struct mid_q_entry *temp;
45
46         if (server == NULL) {
47                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
48                 return NULL;
49         }
50
51         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
52         memset(temp, 0, sizeof(struct mid_q_entry));
53         kref_init(&temp->refcount);
54         temp->mid = get_mid(smb_buffer);
55         temp->pid = current->pid;
56         temp->command = cpu_to_le16(smb_buffer->Command);
57         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
58         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
59         /* when mid allocated can be before when sent */
60         temp->when_alloc = jiffies;
61         temp->server = server;
62
63         /*
64          * The default is for the mid to be synchronous, so the
65          * default callback just wakes up the current task.
66          */
67         get_task_struct(current);
68         temp->creator = current;
69         temp->callback = cifs_wake_up_task;
70         temp->callback_data = current;
71
72         atomic_inc(&midCount);
73         temp->mid_state = MID_REQUEST_ALLOCATED;
74         return temp;
75 }
76
77 static void _cifs_mid_q_entry_release(struct kref *refcount)
78 {
79         struct mid_q_entry *midEntry =
80                         container_of(refcount, struct mid_q_entry, refcount);
81 #ifdef CONFIG_CIFS_STATS2
82         __le16 command = midEntry->server->vals->lock_cmd;
83         __u16 smb_cmd = le16_to_cpu(midEntry->command);
84         unsigned long now;
85         unsigned long roundtrip_time;
86 #endif
87         struct TCP_Server_Info *server = midEntry->server;
88
89         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91             server->ops->handle_cancelled_mid)
92                 server->ops->handle_cancelled_mid(midEntry, server);
93
94         midEntry->mid_state = MID_FREE;
95         atomic_dec(&midCount);
96         if (midEntry->large_buf)
97                 cifs_buf_release(midEntry->resp_buf);
98         else
99                 cifs_small_buf_release(midEntry->resp_buf);
100 #ifdef CONFIG_CIFS_STATS2
101         now = jiffies;
102         if (now < midEntry->when_alloc)
103                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
104         roundtrip_time = now - midEntry->when_alloc;
105
106         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108                         server->slowest_cmd[smb_cmd] = roundtrip_time;
109                         server->fastest_cmd[smb_cmd] = roundtrip_time;
110                 } else {
111                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
113                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
115                 }
116                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117                 server->time_per_cmd[smb_cmd] += roundtrip_time;
118         }
119         /*
120          * commands taking longer than one second (default) can be indications
121          * that something is wrong, unless it is quite a slow link or a very
122          * busy server. Note that this calc is unlikely or impossible to wrap
123          * as long as slow_rsp_threshold is not set way above recommended max
124          * value (32767 ie 9 hours) and is generally harmless even if wrong
125          * since only affects debug counters - so leaving the calc as simple
126          * comparison rather than doing multiple conversions and overflow
127          * checks
128          */
129         if ((slow_rsp_threshold != 0) &&
130             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
131             (midEntry->command != command)) {
132                 /*
133                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134                  * NB: le16_to_cpu returns unsigned so can not be negative below
135                  */
136                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
138
139                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
140                                midEntry->when_sent, midEntry->when_received);
141                 if (cifsFYI & CIFS_TIMER) {
142                         pr_debug("slow rsp: cmd %d mid %llu",
143                                  midEntry->command, midEntry->mid);
144                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145                                   now - midEntry->when_alloc,
146                                   now - midEntry->when_sent,
147                                   now - midEntry->when_received);
148                 }
149         }
150 #endif
151         put_task_struct(midEntry->creator);
152
153         mempool_free(midEntry, cifs_mid_poolp);
154 }
155
156 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
157 {
158         spin_lock(&GlobalMid_Lock);
159         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
160         spin_unlock(&GlobalMid_Lock);
161 }
162
163 void DeleteMidQEntry(struct mid_q_entry *midEntry)
164 {
165         cifs_mid_q_entry_release(midEntry);
166 }
167
168 void
169 cifs_delete_mid(struct mid_q_entry *mid)
170 {
171         spin_lock(&GlobalMid_Lock);
172         if (!(mid->mid_flags & MID_DELETED)) {
173                 list_del_init(&mid->qhead);
174                 mid->mid_flags |= MID_DELETED;
175         }
176         spin_unlock(&GlobalMid_Lock);
177
178         DeleteMidQEntry(mid);
179 }
180
181 /*
182  * smb_send_kvec - send an array of kvecs to the server
183  * @server:     Server to send the data to
184  * @smb_msg:    Message to send
185  * @sent:       amount of data sent on socket is stored here
186  *
187  * Our basic "send data to server" function. Should be called with srv_mutex
188  * held. The caller is responsible for handling the results.
189  */
190 static int
191 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
192               size_t *sent)
193 {
194         int rc = 0;
195         int retries = 0;
196         struct socket *ssocket = server->ssocket;
197
198         *sent = 0;
199
200         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
201         smb_msg->msg_namelen = sizeof(struct sockaddr);
202         smb_msg->msg_control = NULL;
203         smb_msg->msg_controllen = 0;
204         if (server->noblocksnd)
205                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
206         else
207                 smb_msg->msg_flags = MSG_NOSIGNAL;
208
209         while (msg_data_left(smb_msg)) {
210                 /*
211                  * If blocking send, we try 3 times, since each can block
212                  * for 5 seconds. For nonblocking  we have to try more
213                  * but wait increasing amounts of time allowing time for
214                  * socket to clear.  The overall time we wait in either
215                  * case to send on the socket is about 15 seconds.
216                  * Similarly we wait for 15 seconds for a response from
217                  * the server in SendReceive[2] for the server to send
218                  * a response back for most types of requests (except
219                  * SMB Write past end of file which can be slow, and
220                  * blocking lock operations). NFS waits slightly longer
221                  * than CIFS, but this can make it take longer for
222                  * nonresponsive servers to be detected and 15 seconds
223                  * is more than enough time for modern networks to
224                  * send a packet.  In most cases if we fail to send
225                  * after the retries we will kill the socket and
226                  * reconnect which may clear the network problem.
227                  */
228                 rc = sock_sendmsg(ssocket, smb_msg);
229                 if (rc == -EAGAIN) {
230                         retries++;
231                         if (retries >= 14 ||
232                             (!server->noblocksnd && (retries > 2))) {
233                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
234                                          ssocket);
235                                 return -EAGAIN;
236                         }
237                         msleep(1 << retries);
238                         continue;
239                 }
240
241                 if (rc < 0)
242                         return rc;
243
244                 if (rc == 0) {
245                         /* should never happen, letting socket clear before
246                            retrying is our only obvious option here */
247                         cifs_server_dbg(VFS, "tcp sent no data\n");
248                         msleep(500);
249                         continue;
250                 }
251
252                 /* send was at least partially successful */
253                 *sent += rc;
254                 retries = 0; /* in case we get ENOSPC on the next send */
255         }
256         return 0;
257 }
258
259 unsigned long
260 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
261 {
262         unsigned int i;
263         struct kvec *iov;
264         int nvec;
265         unsigned long buflen = 0;
266
267         if (server->vals->header_preamble_size == 0 &&
268             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
269                 iov = &rqst->rq_iov[1];
270                 nvec = rqst->rq_nvec - 1;
271         } else {
272                 iov = rqst->rq_iov;
273                 nvec = rqst->rq_nvec;
274         }
275
276         /* total up iov array first */
277         for (i = 0; i < nvec; i++)
278                 buflen += iov[i].iov_len;
279
280         /*
281          * Add in the page array if there is one. The caller needs to make
282          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
283          * multiple pages ends at page boundary, rq_tailsz needs to be set to
284          * PAGE_SIZE.
285          */
286         if (rqst->rq_npages) {
287                 if (rqst->rq_npages == 1)
288                         buflen += rqst->rq_tailsz;
289                 else {
290                         /*
291                          * If there is more than one page, calculate the
292                          * buffer length based on rq_offset and rq_tailsz
293                          */
294                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
295                                         rqst->rq_offset;
296                         buflen += rqst->rq_tailsz;
297                 }
298         }
299
300         return buflen;
301 }
302
303 static int
304 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
305                 struct smb_rqst *rqst)
306 {
307         int rc = 0;
308         struct kvec *iov;
309         int n_vec;
310         unsigned int send_length = 0;
311         unsigned int i, j;
312         sigset_t mask, oldmask;
313         size_t total_len = 0, sent, size;
314         struct socket *ssocket = server->ssocket;
315         struct msghdr smb_msg;
316         __be32 rfc1002_marker;
317
318         if (cifs_rdma_enabled(server)) {
319                 /* return -EAGAIN when connecting or reconnecting */
320                 rc = -EAGAIN;
321                 if (server->smbd_conn)
322                         rc = smbd_send(server, num_rqst, rqst);
323                 goto smbd_done;
324         }
325
326         if (ssocket == NULL)
327                 return -EAGAIN;
328
329         if (fatal_signal_pending(current)) {
330                 cifs_dbg(FYI, "signal pending before send request\n");
331                 return -ERESTARTSYS;
332         }
333
334         /* cork the socket */
335         tcp_sock_set_cork(ssocket->sk, true);
336
337         for (j = 0; j < num_rqst; j++)
338                 send_length += smb_rqst_len(server, &rqst[j]);
339         rfc1002_marker = cpu_to_be32(send_length);
340
341         /*
342          * We should not allow signals to interrupt the network send because
343          * any partial send will cause session reconnects thus increasing
344          * latency of system calls and overload a server with unnecessary
345          * requests.
346          */
347
348         sigfillset(&mask);
349         sigprocmask(SIG_BLOCK, &mask, &oldmask);
350
351         /* Generate a rfc1002 marker for SMB2+ */
352         if (server->vals->header_preamble_size == 0) {
353                 struct kvec hiov = {
354                         .iov_base = &rfc1002_marker,
355                         .iov_len  = 4
356                 };
357                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
358                 rc = smb_send_kvec(server, &smb_msg, &sent);
359                 if (rc < 0)
360                         goto unmask;
361
362                 total_len += sent;
363                 send_length += 4;
364         }
365
366         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
367
368         for (j = 0; j < num_rqst; j++) {
369                 iov = rqst[j].rq_iov;
370                 n_vec = rqst[j].rq_nvec;
371
372                 size = 0;
373                 for (i = 0; i < n_vec; i++) {
374                         dump_smb(iov[i].iov_base, iov[i].iov_len);
375                         size += iov[i].iov_len;
376                 }
377
378                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
379
380                 rc = smb_send_kvec(server, &smb_msg, &sent);
381                 if (rc < 0)
382                         goto unmask;
383
384                 total_len += sent;
385
386                 /* now walk the page array and send each page in it */
387                 for (i = 0; i < rqst[j].rq_npages; i++) {
388                         struct bio_vec bvec;
389
390                         bvec.bv_page = rqst[j].rq_pages[i];
391                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
392                                              &bvec.bv_offset);
393
394                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
395                                       &bvec, 1, bvec.bv_len);
396                         rc = smb_send_kvec(server, &smb_msg, &sent);
397                         if (rc < 0)
398                                 break;
399
400                         total_len += sent;
401                 }
402         }
403
404 unmask:
405         sigprocmask(SIG_SETMASK, &oldmask, NULL);
406
407         /*
408          * If signal is pending but we have already sent the whole packet to
409          * the server we need to return success status to allow a corresponding
410          * mid entry to be kept in the pending requests queue thus allowing
411          * to handle responses from the server by the client.
412          *
413          * If only part of the packet has been sent there is no need to hide
414          * interrupt because the session will be reconnected anyway, so there
415          * won't be any response from the server to handle.
416          */
417
418         if (signal_pending(current) && (total_len != send_length)) {
419                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
420                 rc = -ERESTARTSYS;
421         }
422
423         /* uncork it */
424         tcp_sock_set_cork(ssocket->sk, false);
425
426         if ((total_len > 0) && (total_len != send_length)) {
427                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
428                          send_length, total_len);
429                 /*
430                  * If we have only sent part of an SMB then the next SMB could
431                  * be taken as the remainder of this one. We need to kill the
432                  * socket so the server throws away the partial SMB
433                  */
434                 server->tcpStatus = CifsNeedReconnect;
435                 trace_smb3_partial_send_reconnect(server->CurrentMid,
436                                                   server->conn_id, server->hostname);
437         }
438 smbd_done:
439         if (rc < 0 && rc != -EINTR)
440                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
441                          rc);
442         else if (rc > 0)
443                 rc = 0;
444
445         return rc;
446 }
447
448 static int
449 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
450               struct smb_rqst *rqst, int flags)
451 {
452         struct kvec iov;
453         struct smb2_transform_hdr *tr_hdr;
454         struct smb_rqst cur_rqst[MAX_COMPOUND];
455         int rc;
456
457         if (!(flags & CIFS_TRANSFORM_REQ))
458                 return __smb_send_rqst(server, num_rqst, rqst);
459
460         if (num_rqst > MAX_COMPOUND - 1)
461                 return -ENOMEM;
462
463         if (!server->ops->init_transform_rq) {
464                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
465                 return -EIO;
466         }
467
468         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
469         if (!tr_hdr)
470                 return -ENOMEM;
471
472         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
473         memset(&iov, 0, sizeof(iov));
474         memset(tr_hdr, 0, sizeof(*tr_hdr));
475
476         iov.iov_base = tr_hdr;
477         iov.iov_len = sizeof(*tr_hdr);
478         cur_rqst[0].rq_iov = &iov;
479         cur_rqst[0].rq_nvec = 1;
480
481         rc = server->ops->init_transform_rq(server, num_rqst + 1,
482                                             &cur_rqst[0], rqst);
483         if (rc)
484                 goto out;
485
486         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
488 out:
489         kfree(tr_hdr);
490         return rc;
491 }
492
493 int
494 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
495          unsigned int smb_buf_length)
496 {
497         struct kvec iov[2];
498         struct smb_rqst rqst = { .rq_iov = iov,
499                                  .rq_nvec = 2 };
500
501         iov[0].iov_base = smb_buffer;
502         iov[0].iov_len = 4;
503         iov[1].iov_base = (char *)smb_buffer + 4;
504         iov[1].iov_len = smb_buf_length;
505
506         return __smb_send_rqst(server, 1, &rqst);
507 }
508
509 static int
510 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
511                       const int timeout, const int flags,
512                       unsigned int *instance)
513 {
514         long rc;
515         int *credits;
516         int optype;
517         long int t;
518         int scredits, in_flight;
519
520         if (timeout < 0)
521                 t = MAX_JIFFY_OFFSET;
522         else
523                 t = msecs_to_jiffies(timeout);
524
525         optype = flags & CIFS_OP_MASK;
526
527         *instance = 0;
528
529         credits = server->ops->get_credits_field(server, optype);
530         /* Since an echo is already inflight, no need to wait to send another */
531         if (*credits <= 0 && optype == CIFS_ECHO_OP)
532                 return -EAGAIN;
533
534         spin_lock(&server->req_lock);
535         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
536                 /* oplock breaks must not be held up */
537                 server->in_flight++;
538                 if (server->in_flight > server->max_in_flight)
539                         server->max_in_flight = server->in_flight;
540                 *credits -= 1;
541                 *instance = server->reconnect_instance;
542                 scredits = *credits;
543                 in_flight = server->in_flight;
544                 spin_unlock(&server->req_lock);
545
546                 trace_smb3_add_credits(server->CurrentMid,
547                                 server->conn_id, server->hostname, scredits, -1, in_flight);
548                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
549                                 __func__, 1, scredits);
550
551                 return 0;
552         }
553
554         while (1) {
555                 if (*credits < num_credits) {
556                         scredits = *credits;
557                         spin_unlock(&server->req_lock);
558
559                         cifs_num_waiters_inc(server);
560                         rc = wait_event_killable_timeout(server->request_q,
561                                 has_credits(server, credits, num_credits), t);
562                         cifs_num_waiters_dec(server);
563                         if (!rc) {
564                                 spin_lock(&server->req_lock);
565                                 scredits = *credits;
566                                 in_flight = server->in_flight;
567                                 spin_unlock(&server->req_lock);
568
569                                 trace_smb3_credit_timeout(server->CurrentMid,
570                                                 server->conn_id, server->hostname, scredits,
571                                                 num_credits, in_flight);
572                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
573                                                 timeout);
574                                 return -EBUSY;
575                         }
576                         if (rc == -ERESTARTSYS)
577                                 return -ERESTARTSYS;
578                         spin_lock(&server->req_lock);
579                 } else {
580                         if (server->tcpStatus == CifsExiting) {
581                                 spin_unlock(&server->req_lock);
582                                 return -ENOENT;
583                         }
584
585                         /*
586                          * For normal commands, reserve the last MAX_COMPOUND
587                          * credits to compound requests.
588                          * Otherwise these compounds could be permanently
589                          * starved for credits by single-credit requests.
590                          *
591                          * To prevent spinning CPU, block this thread until
592                          * there are >MAX_COMPOUND credits available.
593                          * But only do this is we already have a lot of
594                          * credits in flight to avoid triggering this check
595                          * for servers that are slow to hand out credits on
596                          * new sessions.
597                          */
598                         if (!optype && num_credits == 1 &&
599                             server->in_flight > 2 * MAX_COMPOUND &&
600                             *credits <= MAX_COMPOUND) {
601                                 spin_unlock(&server->req_lock);
602
603                                 cifs_num_waiters_inc(server);
604                                 rc = wait_event_killable_timeout(
605                                         server->request_q,
606                                         has_credits(server, credits,
607                                                     MAX_COMPOUND + 1),
608                                         t);
609                                 cifs_num_waiters_dec(server);
610                                 if (!rc) {
611                                         spin_lock(&server->req_lock);
612                                         scredits = *credits;
613                                         in_flight = server->in_flight;
614                                         spin_unlock(&server->req_lock);
615
616                                         trace_smb3_credit_timeout(
617                                                         server->CurrentMid,
618                                                         server->conn_id, server->hostname,
619                                                         scredits, num_credits, in_flight);
620                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
621                                                         timeout);
622                                         return -EBUSY;
623                                 }
624                                 if (rc == -ERESTARTSYS)
625                                         return -ERESTARTSYS;
626                                 spin_lock(&server->req_lock);
627                                 continue;
628                         }
629
630                         /*
631                          * Can not count locking commands against total
632                          * as they are allowed to block on server.
633                          */
634
635                         /* update # of requests on the wire to server */
636                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
637                                 *credits -= num_credits;
638                                 server->in_flight += num_credits;
639                                 if (server->in_flight > server->max_in_flight)
640                                         server->max_in_flight = server->in_flight;
641                                 *instance = server->reconnect_instance;
642                         }
643                         scredits = *credits;
644                         in_flight = server->in_flight;
645                         spin_unlock(&server->req_lock);
646
647                         trace_smb3_add_credits(server->CurrentMid,
648                                         server->conn_id, server->hostname, scredits,
649                                         -(num_credits), in_flight);
650                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
651                                         __func__, num_credits, scredits);
652                         break;
653                 }
654         }
655         return 0;
656 }
657
658 static int
659 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
660                       unsigned int *instance)
661 {
662         return wait_for_free_credits(server, 1, -1, flags,
663                                      instance);
664 }
665
666 static int
667 wait_for_compound_request(struct TCP_Server_Info *server, int num,
668                           const int flags, unsigned int *instance)
669 {
670         int *credits;
671         int scredits, in_flight;
672
673         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
674
675         spin_lock(&server->req_lock);
676         scredits = *credits;
677         in_flight = server->in_flight;
678
679         if (*credits < num) {
680                 /*
681                  * If the server is tight on resources or just gives us less
682                  * credits for other reasons (e.g. requests are coming out of
683                  * order and the server delays granting more credits until it
684                  * processes a missing mid) and we exhausted most available
685                  * credits there may be situations when we try to send
686                  * a compound request but we don't have enough credits. At this
687                  * point the client needs to decide if it should wait for
688                  * additional credits or fail the request. If at least one
689                  * request is in flight there is a high probability that the
690                  * server will return enough credits to satisfy this compound
691                  * request.
692                  *
693                  * Return immediately if no requests in flight since we will be
694                  * stuck on waiting for credits.
695                  */
696                 if (server->in_flight == 0) {
697                         spin_unlock(&server->req_lock);
698                         trace_smb3_insufficient_credits(server->CurrentMid,
699                                         server->conn_id, server->hostname, scredits,
700                                         num, in_flight);
701                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
702                                         __func__, in_flight, num, scredits);
703                         return -EDEADLK;
704                 }
705         }
706         spin_unlock(&server->req_lock);
707
708         return wait_for_free_credits(server, num, 60000, flags,
709                                      instance);
710 }
711
712 int
713 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
714                       unsigned int *num, struct cifs_credits *credits)
715 {
716         *num = size;
717         credits->value = 0;
718         credits->instance = server->reconnect_instance;
719         return 0;
720 }
721
722 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
723                         struct mid_q_entry **ppmidQ)
724 {
725         if (ses->server->tcpStatus == CifsExiting) {
726                 return -ENOENT;
727         }
728
729         if (ses->server->tcpStatus == CifsNeedReconnect) {
730                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
731                 return -EAGAIN;
732         }
733
734         if (ses->status == CifsNew) {
735                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
736                         (in_buf->Command != SMB_COM_NEGOTIATE))
737                         return -EAGAIN;
738                 /* else ok - we are setting up session */
739         }
740
741         if (ses->status == CifsExiting) {
742                 /* check if SMB session is bad because we are setting it up */
743                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
744                         return -EAGAIN;
745                 /* else ok - we are shutting down session */
746         }
747
748         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
749         if (*ppmidQ == NULL)
750                 return -ENOMEM;
751         spin_lock(&GlobalMid_Lock);
752         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
753         spin_unlock(&GlobalMid_Lock);
754         return 0;
755 }
756
757 static int
758 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
759 {
760         int error;
761
762         error = wait_event_freezekillable_unsafe(server->response_q,
763                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
764         if (error < 0)
765                 return -ERESTARTSYS;
766
767         return 0;
768 }
769
770 struct mid_q_entry *
771 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
772 {
773         int rc;
774         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
775         struct mid_q_entry *mid;
776
777         if (rqst->rq_iov[0].iov_len != 4 ||
778             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
779                 return ERR_PTR(-EIO);
780
781         /* enable signing if server requires it */
782         if (server->sign)
783                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
784
785         mid = AllocMidQEntry(hdr, server);
786         if (mid == NULL)
787                 return ERR_PTR(-ENOMEM);
788
789         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
790         if (rc) {
791                 DeleteMidQEntry(mid);
792                 return ERR_PTR(rc);
793         }
794
795         return mid;
796 }
797
798 /*
799  * Send a SMB request and set the callback function in the mid to handle
800  * the result. Caller is responsible for dealing with timeouts.
801  */
802 int
803 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
804                 mid_receive_t *receive, mid_callback_t *callback,
805                 mid_handle_t *handle, void *cbdata, const int flags,
806                 const struct cifs_credits *exist_credits)
807 {
808         int rc;
809         struct mid_q_entry *mid;
810         struct cifs_credits credits = { .value = 0, .instance = 0 };
811         unsigned int instance;
812         int optype;
813
814         optype = flags & CIFS_OP_MASK;
815
816         if ((flags & CIFS_HAS_CREDITS) == 0) {
817                 rc = wait_for_free_request(server, flags, &instance);
818                 if (rc)
819                         return rc;
820                 credits.value = 1;
821                 credits.instance = instance;
822         } else
823                 instance = exist_credits->instance;
824
825         mutex_lock(&server->srv_mutex);
826
827         /*
828          * We can't use credits obtained from the previous session to send this
829          * request. Check if there were reconnects after we obtained credits and
830          * return -EAGAIN in such cases to let callers handle it.
831          */
832         if (instance != server->reconnect_instance) {
833                 mutex_unlock(&server->srv_mutex);
834                 add_credits_and_wake_if(server, &credits, optype);
835                 return -EAGAIN;
836         }
837
838         mid = server->ops->setup_async_request(server, rqst);
839         if (IS_ERR(mid)) {
840                 mutex_unlock(&server->srv_mutex);
841                 add_credits_and_wake_if(server, &credits, optype);
842                 return PTR_ERR(mid);
843         }
844
845         mid->receive = receive;
846         mid->callback = callback;
847         mid->callback_data = cbdata;
848         mid->handle = handle;
849         mid->mid_state = MID_REQUEST_SUBMITTED;
850
851         /* put it on the pending_mid_q */
852         spin_lock(&GlobalMid_Lock);
853         list_add_tail(&mid->qhead, &server->pending_mid_q);
854         spin_unlock(&GlobalMid_Lock);
855
856         /*
857          * Need to store the time in mid before calling I/O. For call_async,
858          * I/O response may come back and free the mid entry on another thread.
859          */
860         cifs_save_when_sent(mid);
861         cifs_in_send_inc(server);
862         rc = smb_send_rqst(server, 1, rqst, flags);
863         cifs_in_send_dec(server);
864
865         if (rc < 0) {
866                 revert_current_mid(server, mid->credits);
867                 server->sequence_number -= 2;
868                 cifs_delete_mid(mid);
869         }
870
871         mutex_unlock(&server->srv_mutex);
872
873         if (rc == 0)
874                 return 0;
875
876         add_credits_and_wake_if(server, &credits, optype);
877         return rc;
878 }
879
880 /*
881  *
882  * Send an SMB Request.  No response info (other than return code)
883  * needs to be parsed.
884  *
885  * flags indicate the type of request buffer and how long to wait
886  * and whether to log NT STATUS code (error) before mapping it to POSIX error
887  *
888  */
889 int
890 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
891                  char *in_buf, int flags)
892 {
893         int rc;
894         struct kvec iov[1];
895         struct kvec rsp_iov;
896         int resp_buf_type;
897
898         iov[0].iov_base = in_buf;
899         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
900         flags |= CIFS_NO_RSP_BUF;
901         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
902         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
903
904         return rc;
905 }
906
907 static int
908 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
909 {
910         int rc = 0;
911
912         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
913                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
914
915         spin_lock(&GlobalMid_Lock);
916         switch (mid->mid_state) {
917         case MID_RESPONSE_RECEIVED:
918                 spin_unlock(&GlobalMid_Lock);
919                 return rc;
920         case MID_RETRY_NEEDED:
921                 rc = -EAGAIN;
922                 break;
923         case MID_RESPONSE_MALFORMED:
924                 rc = -EIO;
925                 break;
926         case MID_SHUTDOWN:
927                 rc = -EHOSTDOWN;
928                 break;
929         default:
930                 if (!(mid->mid_flags & MID_DELETED)) {
931                         list_del_init(&mid->qhead);
932                         mid->mid_flags |= MID_DELETED;
933                 }
934                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
935                          __func__, mid->mid, mid->mid_state);
936                 rc = -EIO;
937         }
938         spin_unlock(&GlobalMid_Lock);
939
940         DeleteMidQEntry(mid);
941         return rc;
942 }
943
944 static inline int
945 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
946             struct mid_q_entry *mid)
947 {
948         return server->ops->send_cancel ?
949                                 server->ops->send_cancel(server, rqst, mid) : 0;
950 }
951
952 int
953 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
954                    bool log_error)
955 {
956         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
957
958         dump_smb(mid->resp_buf, min_t(u32, 92, len));
959
960         /* convert the length into a more usable form */
961         if (server->sign) {
962                 struct kvec iov[2];
963                 int rc = 0;
964                 struct smb_rqst rqst = { .rq_iov = iov,
965                                          .rq_nvec = 2 };
966
967                 iov[0].iov_base = mid->resp_buf;
968                 iov[0].iov_len = 4;
969                 iov[1].iov_base = (char *)mid->resp_buf + 4;
970                 iov[1].iov_len = len - 4;
971                 /* FIXME: add code to kill session */
972                 rc = cifs_verify_signature(&rqst, server,
973                                            mid->sequence_number);
974                 if (rc)
975                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
976                                  rc);
977         }
978
979         /* BB special case reconnect tid and uid here? */
980         return map_and_check_smb_error(mid, log_error);
981 }
982
983 struct mid_q_entry *
984 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
985                    struct smb_rqst *rqst)
986 {
987         int rc;
988         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
989         struct mid_q_entry *mid;
990
991         if (rqst->rq_iov[0].iov_len != 4 ||
992             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
993                 return ERR_PTR(-EIO);
994
995         rc = allocate_mid(ses, hdr, &mid);
996         if (rc)
997                 return ERR_PTR(rc);
998         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
999         if (rc) {
1000                 cifs_delete_mid(mid);
1001                 return ERR_PTR(rc);
1002         }
1003         return mid;
1004 }
1005
1006 static void
1007 cifs_compound_callback(struct mid_q_entry *mid)
1008 {
1009         struct TCP_Server_Info *server = mid->server;
1010         struct cifs_credits credits;
1011
1012         credits.value = server->ops->get_credits(mid);
1013         credits.instance = server->reconnect_instance;
1014
1015         add_credits(server, &credits, mid->optype);
1016 }
1017
1018 static void
1019 cifs_compound_last_callback(struct mid_q_entry *mid)
1020 {
1021         cifs_compound_callback(mid);
1022         cifs_wake_up_task(mid);
1023 }
1024
1025 static void
1026 cifs_cancelled_callback(struct mid_q_entry *mid)
1027 {
1028         cifs_compound_callback(mid);
1029         DeleteMidQEntry(mid);
1030 }
1031
1032 /*
1033  * Return a channel (master if none) of @ses that can be used to send
1034  * regular requests.
1035  *
1036  * If we are currently binding a new channel (negprot/sess.setup),
1037  * return the new incomplete channel.
1038  */
1039 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1040 {
1041         uint index = 0;
1042
1043         if (!ses)
1044                 return NULL;
1045
1046         if (!ses->binding) {
1047                 /* round robin */
1048                 if (ses->chan_count > 1) {
1049                         index = (uint)atomic_inc_return(&ses->chan_seq);
1050                         index %= ses->chan_count;
1051                 }
1052                 return ses->chans[index].server;
1053         } else {
1054                 return cifs_ses_server(ses);
1055         }
1056 }
1057
1058 int
1059 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1060                    struct TCP_Server_Info *server,
1061                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1062                    int *resp_buf_type, struct kvec *resp_iov)
1063 {
1064         int i, j, optype, rc = 0;
1065         struct mid_q_entry *midQ[MAX_COMPOUND];
1066         bool cancelled_mid[MAX_COMPOUND] = {false};
1067         struct cifs_credits credits[MAX_COMPOUND] = {
1068                 { .value = 0, .instance = 0 }
1069         };
1070         unsigned int instance;
1071         char *buf;
1072
1073         optype = flags & CIFS_OP_MASK;
1074
1075         for (i = 0; i < num_rqst; i++)
1076                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1077
1078         if (!ses || !ses->server || !server) {
1079                 cifs_dbg(VFS, "Null session\n");
1080                 return -EIO;
1081         }
1082
1083         if (server->tcpStatus == CifsExiting)
1084                 return -ENOENT;
1085
1086         /*
1087          * Wait for all the requests to become available.
1088          * This approach still leaves the possibility to be stuck waiting for
1089          * credits if the server doesn't grant credits to the outstanding
1090          * requests and if the client is completely idle, not generating any
1091          * other requests.
1092          * This can be handled by the eventual session reconnect.
1093          */
1094         rc = wait_for_compound_request(server, num_rqst, flags,
1095                                        &instance);
1096         if (rc)
1097                 return rc;
1098
1099         for (i = 0; i < num_rqst; i++) {
1100                 credits[i].value = 1;
1101                 credits[i].instance = instance;
1102         }
1103
1104         /*
1105          * Make sure that we sign in the same order that we send on this socket
1106          * and avoid races inside tcp sendmsg code that could cause corruption
1107          * of smb data.
1108          */
1109
1110         mutex_lock(&server->srv_mutex);
1111
1112         /*
1113          * All the parts of the compound chain belong obtained credits from the
1114          * same session. We can not use credits obtained from the previous
1115          * session to send this request. Check if there were reconnects after
1116          * we obtained credits and return -EAGAIN in such cases to let callers
1117          * handle it.
1118          */
1119         if (instance != server->reconnect_instance) {
1120                 mutex_unlock(&server->srv_mutex);
1121                 for (j = 0; j < num_rqst; j++)
1122                         add_credits(server, &credits[j], optype);
1123                 return -EAGAIN;
1124         }
1125
1126         for (i = 0; i < num_rqst; i++) {
1127                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1128                 if (IS_ERR(midQ[i])) {
1129                         revert_current_mid(server, i);
1130                         for (j = 0; j < i; j++)
1131                                 cifs_delete_mid(midQ[j]);
1132                         mutex_unlock(&server->srv_mutex);
1133
1134                         /* Update # of requests on wire to server */
1135                         for (j = 0; j < num_rqst; j++)
1136                                 add_credits(server, &credits[j], optype);
1137                         return PTR_ERR(midQ[i]);
1138                 }
1139
1140                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1141                 midQ[i]->optype = optype;
1142                 /*
1143                  * Invoke callback for every part of the compound chain
1144                  * to calculate credits properly. Wake up this thread only when
1145                  * the last element is received.
1146                  */
1147                 if (i < num_rqst - 1)
1148                         midQ[i]->callback = cifs_compound_callback;
1149                 else
1150                         midQ[i]->callback = cifs_compound_last_callback;
1151         }
1152         cifs_in_send_inc(server);
1153         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1154         cifs_in_send_dec(server);
1155
1156         for (i = 0; i < num_rqst; i++)
1157                 cifs_save_when_sent(midQ[i]);
1158
1159         if (rc < 0) {
1160                 revert_current_mid(server, num_rqst);
1161                 server->sequence_number -= 2;
1162         }
1163
1164         mutex_unlock(&server->srv_mutex);
1165
1166         /*
1167          * If sending failed for some reason or it is an oplock break that we
1168          * will not receive a response to - return credits back
1169          */
1170         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1171                 for (i = 0; i < num_rqst; i++)
1172                         add_credits(server, &credits[i], optype);
1173                 goto out;
1174         }
1175
1176         /*
1177          * At this point the request is passed to the network stack - we assume
1178          * that any credits taken from the server structure on the client have
1179          * been spent and we can't return them back. Once we receive responses
1180          * we will collect credits granted by the server in the mid callbacks
1181          * and add those credits to the server structure.
1182          */
1183
1184         /*
1185          * Compounding is never used during session establish.
1186          */
1187         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1188                 mutex_lock(&server->srv_mutex);
1189                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1190                                            rqst[0].rq_nvec);
1191                 mutex_unlock(&server->srv_mutex);
1192         }
1193
1194         for (i = 0; i < num_rqst; i++) {
1195                 rc = wait_for_response(server, midQ[i]);
1196                 if (rc != 0)
1197                         break;
1198         }
1199         if (rc != 0) {
1200                 for (; i < num_rqst; i++) {
1201                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1202                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1203                         send_cancel(server, &rqst[i], midQ[i]);
1204                         spin_lock(&GlobalMid_Lock);
1205                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1206                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1207                                 midQ[i]->callback = cifs_cancelled_callback;
1208                                 cancelled_mid[i] = true;
1209                                 credits[i].value = 0;
1210                         }
1211                         spin_unlock(&GlobalMid_Lock);
1212                 }
1213         }
1214
1215         for (i = 0; i < num_rqst; i++) {
1216                 if (rc < 0)
1217                         goto out;
1218
1219                 rc = cifs_sync_mid_result(midQ[i], server);
1220                 if (rc != 0) {
1221                         /* mark this mid as cancelled to not free it below */
1222                         cancelled_mid[i] = true;
1223                         goto out;
1224                 }
1225
1226                 if (!midQ[i]->resp_buf ||
1227                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1228                         rc = -EIO;
1229                         cifs_dbg(FYI, "Bad MID state?\n");
1230                         goto out;
1231                 }
1232
1233                 buf = (char *)midQ[i]->resp_buf;
1234                 resp_iov[i].iov_base = buf;
1235                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1236                         server->vals->header_preamble_size;
1237
1238                 if (midQ[i]->large_buf)
1239                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1240                 else
1241                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1242
1243                 rc = server->ops->check_receive(midQ[i], server,
1244                                                      flags & CIFS_LOG_ERROR);
1245
1246                 /* mark it so buf will not be freed by cifs_delete_mid */
1247                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1248                         midQ[i]->resp_buf = NULL;
1249
1250         }
1251
1252         /*
1253          * Compounding is never used during session establish.
1254          */
1255         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1256                 struct kvec iov = {
1257                         .iov_base = resp_iov[0].iov_base,
1258                         .iov_len = resp_iov[0].iov_len
1259                 };
1260                 mutex_lock(&server->srv_mutex);
1261                 smb311_update_preauth_hash(ses, &iov, 1);
1262                 mutex_unlock(&server->srv_mutex);
1263         }
1264
1265 out:
1266         /*
1267          * This will dequeue all mids. After this it is important that the
1268          * demultiplex_thread will not process any of these mids any futher.
1269          * This is prevented above by using a noop callback that will not
1270          * wake this thread except for the very last PDU.
1271          */
1272         for (i = 0; i < num_rqst; i++) {
1273                 if (!cancelled_mid[i])
1274                         cifs_delete_mid(midQ[i]);
1275         }
1276
1277         return rc;
1278 }
1279
1280 int
1281 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1282                struct TCP_Server_Info *server,
1283                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1284                struct kvec *resp_iov)
1285 {
1286         return compound_send_recv(xid, ses, server, flags, 1,
1287                                   rqst, resp_buf_type, resp_iov);
1288 }
1289
1290 int
1291 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1292              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1293              const int flags, struct kvec *resp_iov)
1294 {
1295         struct smb_rqst rqst;
1296         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1297         int rc;
1298
1299         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1300                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1301                                         GFP_KERNEL);
1302                 if (!new_iov) {
1303                         /* otherwise cifs_send_recv below sets resp_buf_type */
1304                         *resp_buf_type = CIFS_NO_BUFFER;
1305                         return -ENOMEM;
1306                 }
1307         } else
1308                 new_iov = s_iov;
1309
1310         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1311         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1312
1313         new_iov[0].iov_base = new_iov[1].iov_base;
1314         new_iov[0].iov_len = 4;
1315         new_iov[1].iov_base += 4;
1316         new_iov[1].iov_len -= 4;
1317
1318         memset(&rqst, 0, sizeof(struct smb_rqst));
1319         rqst.rq_iov = new_iov;
1320         rqst.rq_nvec = n_vec + 1;
1321
1322         rc = cifs_send_recv(xid, ses, ses->server,
1323                             &rqst, resp_buf_type, flags, resp_iov);
1324         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1325                 kfree(new_iov);
1326         return rc;
1327 }
1328
1329 int
1330 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1331             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1332             int *pbytes_returned, const int flags)
1333 {
1334         int rc = 0;
1335         struct mid_q_entry *midQ;
1336         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1337         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1338         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1339         struct cifs_credits credits = { .value = 1, .instance = 0 };
1340         struct TCP_Server_Info *server;
1341
1342         if (ses == NULL) {
1343                 cifs_dbg(VFS, "Null smb session\n");
1344                 return -EIO;
1345         }
1346         server = ses->server;
1347         if (server == NULL) {
1348                 cifs_dbg(VFS, "Null tcp session\n");
1349                 return -EIO;
1350         }
1351
1352         if (server->tcpStatus == CifsExiting)
1353                 return -ENOENT;
1354
1355         /* Ensure that we do not send more than 50 overlapping requests
1356            to the same server. We may make this configurable later or
1357            use ses->maxReq */
1358
1359         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1360                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1361                                 len);
1362                 return -EIO;
1363         }
1364
1365         rc = wait_for_free_request(server, flags, &credits.instance);
1366         if (rc)
1367                 return rc;
1368
1369         /* make sure that we sign in the same order that we send on this socket
1370            and avoid races inside tcp sendmsg code that could cause corruption
1371            of smb data */
1372
1373         mutex_lock(&server->srv_mutex);
1374
1375         rc = allocate_mid(ses, in_buf, &midQ);
1376         if (rc) {
1377                 mutex_unlock(&server->srv_mutex);
1378                 /* Update # of requests on wire to server */
1379                 add_credits(server, &credits, 0);
1380                 return rc;
1381         }
1382
1383         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1384         if (rc) {
1385                 mutex_unlock(&server->srv_mutex);
1386                 goto out;
1387         }
1388
1389         midQ->mid_state = MID_REQUEST_SUBMITTED;
1390
1391         cifs_in_send_inc(server);
1392         rc = smb_send(server, in_buf, len);
1393         cifs_in_send_dec(server);
1394         cifs_save_when_sent(midQ);
1395
1396         if (rc < 0)
1397                 server->sequence_number -= 2;
1398
1399         mutex_unlock(&server->srv_mutex);
1400
1401         if (rc < 0)
1402                 goto out;
1403
1404         rc = wait_for_response(server, midQ);
1405         if (rc != 0) {
1406                 send_cancel(server, &rqst, midQ);
1407                 spin_lock(&GlobalMid_Lock);
1408                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1409                         /* no longer considered to be "in-flight" */
1410                         midQ->callback = DeleteMidQEntry;
1411                         spin_unlock(&GlobalMid_Lock);
1412                         add_credits(server, &credits, 0);
1413                         return rc;
1414                 }
1415                 spin_unlock(&GlobalMid_Lock);
1416         }
1417
1418         rc = cifs_sync_mid_result(midQ, server);
1419         if (rc != 0) {
1420                 add_credits(server, &credits, 0);
1421                 return rc;
1422         }
1423
1424         if (!midQ->resp_buf || !out_buf ||
1425             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1426                 rc = -EIO;
1427                 cifs_server_dbg(VFS, "Bad MID state?\n");
1428                 goto out;
1429         }
1430
1431         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1432         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1433         rc = cifs_check_receive(midQ, server, 0);
1434 out:
1435         cifs_delete_mid(midQ);
1436         add_credits(server, &credits, 0);
1437
1438         return rc;
1439 }
1440
1441 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1442    blocking lock to return. */
1443
1444 static int
1445 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1446                         struct smb_hdr *in_buf,
1447                         struct smb_hdr *out_buf)
1448 {
1449         int bytes_returned;
1450         struct cifs_ses *ses = tcon->ses;
1451         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1452
1453         /* We just modify the current in_buf to change
1454            the type of lock from LOCKING_ANDX_SHARED_LOCK
1455            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1456            LOCKING_ANDX_CANCEL_LOCK. */
1457
1458         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1459         pSMB->Timeout = 0;
1460         pSMB->hdr.Mid = get_next_mid(ses->server);
1461
1462         return SendReceive(xid, ses, in_buf, out_buf,
1463                         &bytes_returned, 0);
1464 }
1465
1466 int
1467 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1468             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1469             int *pbytes_returned)
1470 {
1471         int rc = 0;
1472         int rstart = 0;
1473         struct mid_q_entry *midQ;
1474         struct cifs_ses *ses;
1475         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1476         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1477         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1478         unsigned int instance;
1479         struct TCP_Server_Info *server;
1480
1481         if (tcon == NULL || tcon->ses == NULL) {
1482                 cifs_dbg(VFS, "Null smb session\n");
1483                 return -EIO;
1484         }
1485         ses = tcon->ses;
1486         server = ses->server;
1487
1488         if (server == NULL) {
1489                 cifs_dbg(VFS, "Null tcp session\n");
1490                 return -EIO;
1491         }
1492
1493         if (server->tcpStatus == CifsExiting)
1494                 return -ENOENT;
1495
1496         /* Ensure that we do not send more than 50 overlapping requests
1497            to the same server. We may make this configurable later or
1498            use ses->maxReq */
1499
1500         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1501                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1502                               len);
1503                 return -EIO;
1504         }
1505
1506         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1507         if (rc)
1508                 return rc;
1509
1510         /* make sure that we sign in the same order that we send on this socket
1511            and avoid races inside tcp sendmsg code that could cause corruption
1512            of smb data */
1513
1514         mutex_lock(&server->srv_mutex);
1515
1516         rc = allocate_mid(ses, in_buf, &midQ);
1517         if (rc) {
1518                 mutex_unlock(&server->srv_mutex);
1519                 return rc;
1520         }
1521
1522         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1523         if (rc) {
1524                 cifs_delete_mid(midQ);
1525                 mutex_unlock(&server->srv_mutex);
1526                 return rc;
1527         }
1528
1529         midQ->mid_state = MID_REQUEST_SUBMITTED;
1530         cifs_in_send_inc(server);
1531         rc = smb_send(server, in_buf, len);
1532         cifs_in_send_dec(server);
1533         cifs_save_when_sent(midQ);
1534
1535         if (rc < 0)
1536                 server->sequence_number -= 2;
1537
1538         mutex_unlock(&server->srv_mutex);
1539
1540         if (rc < 0) {
1541                 cifs_delete_mid(midQ);
1542                 return rc;
1543         }
1544
1545         /* Wait for a reply - allow signals to interrupt. */
1546         rc = wait_event_interruptible(server->response_q,
1547                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1548                 ((server->tcpStatus != CifsGood) &&
1549                  (server->tcpStatus != CifsNew)));
1550
1551         /* Were we interrupted by a signal ? */
1552         if ((rc == -ERESTARTSYS) &&
1553                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1554                 ((server->tcpStatus == CifsGood) ||
1555                  (server->tcpStatus == CifsNew))) {
1556
1557                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1558                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1559                            blocking lock to return. */
1560                         rc = send_cancel(server, &rqst, midQ);
1561                         if (rc) {
1562                                 cifs_delete_mid(midQ);
1563                                 return rc;
1564                         }
1565                 } else {
1566                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1567                            to cause the blocking lock to return. */
1568
1569                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1570
1571                         /* If we get -ENOLCK back the lock may have
1572                            already been removed. Don't exit in this case. */
1573                         if (rc && rc != -ENOLCK) {
1574                                 cifs_delete_mid(midQ);
1575                                 return rc;
1576                         }
1577                 }
1578
1579                 rc = wait_for_response(server, midQ);
1580                 if (rc) {
1581                         send_cancel(server, &rqst, midQ);
1582                         spin_lock(&GlobalMid_Lock);
1583                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1584                                 /* no longer considered to be "in-flight" */
1585                                 midQ->callback = DeleteMidQEntry;
1586                                 spin_unlock(&GlobalMid_Lock);
1587                                 return rc;
1588                         }
1589                         spin_unlock(&GlobalMid_Lock);
1590                 }
1591
1592                 /* We got the response - restart system call. */
1593                 rstart = 1;
1594         }
1595
1596         rc = cifs_sync_mid_result(midQ, server);
1597         if (rc != 0)
1598                 return rc;
1599
1600         /* rcvd frame is ok */
1601         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1602                 rc = -EIO;
1603                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1604                 goto out;
1605         }
1606
1607         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1608         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1609         rc = cifs_check_receive(midQ, server, 0);
1610 out:
1611         cifs_delete_mid(midQ);
1612         if (rstart && rc == -EACCES)
1613                 return -ERESTARTSYS;
1614         return rc;
1615 }