KVM: PPC: Book3S HV: Fix vcore_blocked tracepoint
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include "cifspdu.h"
25 #include "cifsglob.h"
26 #include "cifsproto.h"
27 #include "cifs_debug.h"
28 #include "smb2proto.h"
29 #include "smbdirect.h"
30
31 /* Max number of iovectors we can use off the stack when sending requests. */
32 #define CIFS_MAX_IOV_SIZE 8
33
34 void
35 cifs_wake_up_task(struct mid_q_entry *mid)
36 {
37         wake_up_process(mid->callback_data);
38 }
39
40 struct mid_q_entry *
41 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
42 {
43         struct mid_q_entry *temp;
44
45         if (server == NULL) {
46                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
47                 return NULL;
48         }
49
50         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
51         memset(temp, 0, sizeof(struct mid_q_entry));
52         kref_init(&temp->refcount);
53         temp->mid = get_mid(smb_buffer);
54         temp->pid = current->pid;
55         temp->command = cpu_to_le16(smb_buffer->Command);
56         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
57         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
58         /* when mid allocated can be before when sent */
59         temp->when_alloc = jiffies;
60         temp->server = server;
61
62         /*
63          * The default is for the mid to be synchronous, so the
64          * default callback just wakes up the current task.
65          */
66         get_task_struct(current);
67         temp->creator = current;
68         temp->callback = cifs_wake_up_task;
69         temp->callback_data = current;
70
71         atomic_inc(&midCount);
72         temp->mid_state = MID_REQUEST_ALLOCATED;
73         return temp;
74 }
75
76 static void _cifs_mid_q_entry_release(struct kref *refcount)
77 {
78         struct mid_q_entry *midEntry =
79                         container_of(refcount, struct mid_q_entry, refcount);
80 #ifdef CONFIG_CIFS_STATS2
81         __le16 command = midEntry->server->vals->lock_cmd;
82         __u16 smb_cmd = le16_to_cpu(midEntry->command);
83         unsigned long now;
84         unsigned long roundtrip_time;
85 #endif
86         struct TCP_Server_Info *server = midEntry->server;
87
88         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90             server->ops->handle_cancelled_mid)
91                 server->ops->handle_cancelled_mid(midEntry, server);
92
93         midEntry->mid_state = MID_FREE;
94         atomic_dec(&midCount);
95         if (midEntry->large_buf)
96                 cifs_buf_release(midEntry->resp_buf);
97         else
98                 cifs_small_buf_release(midEntry->resp_buf);
99 #ifdef CONFIG_CIFS_STATS2
100         now = jiffies;
101         if (now < midEntry->when_alloc)
102                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
103         roundtrip_time = now - midEntry->when_alloc;
104
105         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107                         server->slowest_cmd[smb_cmd] = roundtrip_time;
108                         server->fastest_cmd[smb_cmd] = roundtrip_time;
109                 } else {
110                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
112                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
114                 }
115                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116                 server->time_per_cmd[smb_cmd] += roundtrip_time;
117         }
118         /*
119          * commands taking longer than one second (default) can be indications
120          * that something is wrong, unless it is quite a slow link or a very
121          * busy server. Note that this calc is unlikely or impossible to wrap
122          * as long as slow_rsp_threshold is not set way above recommended max
123          * value (32767 ie 9 hours) and is generally harmless even if wrong
124          * since only affects debug counters - so leaving the calc as simple
125          * comparison rather than doing multiple conversions and overflow
126          * checks
127          */
128         if ((slow_rsp_threshold != 0) &&
129             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
130             (midEntry->command != command)) {
131                 /*
132                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133                  * NB: le16_to_cpu returns unsigned so can not be negative below
134                  */
135                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
137
138                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
139                                midEntry->when_sent, midEntry->when_received);
140                 if (cifsFYI & CIFS_TIMER) {
141                         pr_debug("slow rsp: cmd %d mid %llu",
142                                  midEntry->command, midEntry->mid);
143                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144                                   now - midEntry->when_alloc,
145                                   now - midEntry->when_sent,
146                                   now - midEntry->when_received);
147                 }
148         }
149 #endif
150         put_task_struct(midEntry->creator);
151
152         mempool_free(midEntry, cifs_mid_poolp);
153 }
154
155 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156 {
157         spin_lock(&GlobalMid_Lock);
158         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159         spin_unlock(&GlobalMid_Lock);
160 }
161
162 void DeleteMidQEntry(struct mid_q_entry *midEntry)
163 {
164         cifs_mid_q_entry_release(midEntry);
165 }
166
167 void
168 cifs_delete_mid(struct mid_q_entry *mid)
169 {
170         spin_lock(&GlobalMid_Lock);
171         if (!(mid->mid_flags & MID_DELETED)) {
172                 list_del_init(&mid->qhead);
173                 mid->mid_flags |= MID_DELETED;
174         }
175         spin_unlock(&GlobalMid_Lock);
176
177         DeleteMidQEntry(mid);
178 }
179
180 /*
181  * smb_send_kvec - send an array of kvecs to the server
182  * @server:     Server to send the data to
183  * @smb_msg:    Message to send
184  * @sent:       amount of data sent on socket is stored here
185  *
186  * Our basic "send data to server" function. Should be called with srv_mutex
187  * held. The caller is responsible for handling the results.
188  */
189 static int
190 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191               size_t *sent)
192 {
193         int rc = 0;
194         int retries = 0;
195         struct socket *ssocket = server->ssocket;
196
197         *sent = 0;
198
199         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200         smb_msg->msg_namelen = sizeof(struct sockaddr);
201         smb_msg->msg_control = NULL;
202         smb_msg->msg_controllen = 0;
203         if (server->noblocksnd)
204                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
205         else
206                 smb_msg->msg_flags = MSG_NOSIGNAL;
207
208         while (msg_data_left(smb_msg)) {
209                 /*
210                  * If blocking send, we try 3 times, since each can block
211                  * for 5 seconds. For nonblocking  we have to try more
212                  * but wait increasing amounts of time allowing time for
213                  * socket to clear.  The overall time we wait in either
214                  * case to send on the socket is about 15 seconds.
215                  * Similarly we wait for 15 seconds for a response from
216                  * the server in SendReceive[2] for the server to send
217                  * a response back for most types of requests (except
218                  * SMB Write past end of file which can be slow, and
219                  * blocking lock operations). NFS waits slightly longer
220                  * than CIFS, but this can make it take longer for
221                  * nonresponsive servers to be detected and 15 seconds
222                  * is more than enough time for modern networks to
223                  * send a packet.  In most cases if we fail to send
224                  * after the retries we will kill the socket and
225                  * reconnect which may clear the network problem.
226                  */
227                 rc = sock_sendmsg(ssocket, smb_msg);
228                 if (rc == -EAGAIN) {
229                         retries++;
230                         if (retries >= 14 ||
231                             (!server->noblocksnd && (retries > 2))) {
232                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
233                                          ssocket);
234                                 return -EAGAIN;
235                         }
236                         msleep(1 << retries);
237                         continue;
238                 }
239
240                 if (rc < 0)
241                         return rc;
242
243                 if (rc == 0) {
244                         /* should never happen, letting socket clear before
245                            retrying is our only obvious option here */
246                         cifs_server_dbg(VFS, "tcp sent no data\n");
247                         msleep(500);
248                         continue;
249                 }
250
251                 /* send was at least partially successful */
252                 *sent += rc;
253                 retries = 0; /* in case we get ENOSPC on the next send */
254         }
255         return 0;
256 }
257
258 unsigned long
259 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
260 {
261         unsigned int i;
262         struct kvec *iov;
263         int nvec;
264         unsigned long buflen = 0;
265
266         if (server->vals->header_preamble_size == 0 &&
267             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
268                 iov = &rqst->rq_iov[1];
269                 nvec = rqst->rq_nvec - 1;
270         } else {
271                 iov = rqst->rq_iov;
272                 nvec = rqst->rq_nvec;
273         }
274
275         /* total up iov array first */
276         for (i = 0; i < nvec; i++)
277                 buflen += iov[i].iov_len;
278
279         /*
280          * Add in the page array if there is one. The caller needs to make
281          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282          * multiple pages ends at page boundary, rq_tailsz needs to be set to
283          * PAGE_SIZE.
284          */
285         if (rqst->rq_npages) {
286                 if (rqst->rq_npages == 1)
287                         buflen += rqst->rq_tailsz;
288                 else {
289                         /*
290                          * If there is more than one page, calculate the
291                          * buffer length based on rq_offset and rq_tailsz
292                          */
293                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294                                         rqst->rq_offset;
295                         buflen += rqst->rq_tailsz;
296                 }
297         }
298
299         return buflen;
300 }
301
302 static int
303 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304                 struct smb_rqst *rqst)
305 {
306         int rc = 0;
307         struct kvec *iov;
308         int n_vec;
309         unsigned int send_length = 0;
310         unsigned int i, j;
311         sigset_t mask, oldmask;
312         size_t total_len = 0, sent, size;
313         struct socket *ssocket = server->ssocket;
314         struct msghdr smb_msg;
315         __be32 rfc1002_marker;
316
317         if (cifs_rdma_enabled(server)) {
318                 /* return -EAGAIN when connecting or reconnecting */
319                 rc = -EAGAIN;
320                 if (server->smbd_conn)
321                         rc = smbd_send(server, num_rqst, rqst);
322                 goto smbd_done;
323         }
324
325         if (ssocket == NULL)
326                 return -EAGAIN;
327
328         if (fatal_signal_pending(current)) {
329                 cifs_dbg(FYI, "signal pending before send request\n");
330                 return -ERESTARTSYS;
331         }
332
333         /* cork the socket */
334         tcp_sock_set_cork(ssocket->sk, true);
335
336         for (j = 0; j < num_rqst; j++)
337                 send_length += smb_rqst_len(server, &rqst[j]);
338         rfc1002_marker = cpu_to_be32(send_length);
339
340         /*
341          * We should not allow signals to interrupt the network send because
342          * any partial send will cause session reconnects thus increasing
343          * latency of system calls and overload a server with unnecessary
344          * requests.
345          */
346
347         sigfillset(&mask);
348         sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
350         /* Generate a rfc1002 marker for SMB2+ */
351         if (server->vals->header_preamble_size == 0) {
352                 struct kvec hiov = {
353                         .iov_base = &rfc1002_marker,
354                         .iov_len  = 4
355                 };
356                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
357                 rc = smb_send_kvec(server, &smb_msg, &sent);
358                 if (rc < 0)
359                         goto unmask;
360
361                 total_len += sent;
362                 send_length += 4;
363         }
364
365         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
367         for (j = 0; j < num_rqst; j++) {
368                 iov = rqst[j].rq_iov;
369                 n_vec = rqst[j].rq_nvec;
370
371                 size = 0;
372                 for (i = 0; i < n_vec; i++) {
373                         dump_smb(iov[i].iov_base, iov[i].iov_len);
374                         size += iov[i].iov_len;
375                 }
376
377                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
378
379                 rc = smb_send_kvec(server, &smb_msg, &sent);
380                 if (rc < 0)
381                         goto unmask;
382
383                 total_len += sent;
384
385                 /* now walk the page array and send each page in it */
386                 for (i = 0; i < rqst[j].rq_npages; i++) {
387                         struct bio_vec bvec;
388
389                         bvec.bv_page = rqst[j].rq_pages[i];
390                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391                                              &bvec.bv_offset);
392
393                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
394                                       &bvec, 1, bvec.bv_len);
395                         rc = smb_send_kvec(server, &smb_msg, &sent);
396                         if (rc < 0)
397                                 break;
398
399                         total_len += sent;
400                 }
401         }
402
403 unmask:
404         sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406         /*
407          * If signal is pending but we have already sent the whole packet to
408          * the server we need to return success status to allow a corresponding
409          * mid entry to be kept in the pending requests queue thus allowing
410          * to handle responses from the server by the client.
411          *
412          * If only part of the packet has been sent there is no need to hide
413          * interrupt because the session will be reconnected anyway, so there
414          * won't be any response from the server to handle.
415          */
416
417         if (signal_pending(current) && (total_len != send_length)) {
418                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
419                 rc = -ERESTARTSYS;
420         }
421
422         /* uncork it */
423         tcp_sock_set_cork(ssocket->sk, false);
424
425         if ((total_len > 0) && (total_len != send_length)) {
426                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
427                          send_length, total_len);
428                 /*
429                  * If we have only sent part of an SMB then the next SMB could
430                  * be taken as the remainder of this one. We need to kill the
431                  * socket so the server throws away the partial SMB
432                  */
433                 cifs_signal_cifsd_for_reconnect(server, false);
434                 trace_smb3_partial_send_reconnect(server->CurrentMid,
435                                                   server->conn_id, server->hostname);
436         }
437 smbd_done:
438         if (rc < 0 && rc != -EINTR)
439                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
440                          rc);
441         else if (rc > 0)
442                 rc = 0;
443
444         return rc;
445 }
446
447 static int
448 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
449               struct smb_rqst *rqst, int flags)
450 {
451         struct kvec iov;
452         struct smb2_transform_hdr *tr_hdr;
453         struct smb_rqst cur_rqst[MAX_COMPOUND];
454         int rc;
455
456         if (!(flags & CIFS_TRANSFORM_REQ))
457                 return __smb_send_rqst(server, num_rqst, rqst);
458
459         if (num_rqst > MAX_COMPOUND - 1)
460                 return -ENOMEM;
461
462         if (!server->ops->init_transform_rq) {
463                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
464                 return -EIO;
465         }
466
467         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
468         if (!tr_hdr)
469                 return -ENOMEM;
470
471         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
472         memset(&iov, 0, sizeof(iov));
473         memset(tr_hdr, 0, sizeof(*tr_hdr));
474
475         iov.iov_base = tr_hdr;
476         iov.iov_len = sizeof(*tr_hdr);
477         cur_rqst[0].rq_iov = &iov;
478         cur_rqst[0].rq_nvec = 1;
479
480         rc = server->ops->init_transform_rq(server, num_rqst + 1,
481                                             &cur_rqst[0], rqst);
482         if (rc)
483                 goto out;
484
485         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
486         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
487 out:
488         kfree(tr_hdr);
489         return rc;
490 }
491
492 int
493 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
494          unsigned int smb_buf_length)
495 {
496         struct kvec iov[2];
497         struct smb_rqst rqst = { .rq_iov = iov,
498                                  .rq_nvec = 2 };
499
500         iov[0].iov_base = smb_buffer;
501         iov[0].iov_len = 4;
502         iov[1].iov_base = (char *)smb_buffer + 4;
503         iov[1].iov_len = smb_buf_length;
504
505         return __smb_send_rqst(server, 1, &rqst);
506 }
507
508 static int
509 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
510                       const int timeout, const int flags,
511                       unsigned int *instance)
512 {
513         long rc;
514         int *credits;
515         int optype;
516         long int t;
517         int scredits, in_flight;
518
519         if (timeout < 0)
520                 t = MAX_JIFFY_OFFSET;
521         else
522                 t = msecs_to_jiffies(timeout);
523
524         optype = flags & CIFS_OP_MASK;
525
526         *instance = 0;
527
528         credits = server->ops->get_credits_field(server, optype);
529         /* Since an echo is already inflight, no need to wait to send another */
530         if (*credits <= 0 && optype == CIFS_ECHO_OP)
531                 return -EAGAIN;
532
533         spin_lock(&server->req_lock);
534         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
535                 /* oplock breaks must not be held up */
536                 server->in_flight++;
537                 if (server->in_flight > server->max_in_flight)
538                         server->max_in_flight = server->in_flight;
539                 *credits -= 1;
540                 *instance = server->reconnect_instance;
541                 scredits = *credits;
542                 in_flight = server->in_flight;
543                 spin_unlock(&server->req_lock);
544
545                 trace_smb3_add_credits(server->CurrentMid,
546                                 server->conn_id, server->hostname, scredits, -1, in_flight);
547                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
548                                 __func__, 1, scredits);
549
550                 return 0;
551         }
552
553         while (1) {
554                 if (*credits < num_credits) {
555                         scredits = *credits;
556                         spin_unlock(&server->req_lock);
557
558                         cifs_num_waiters_inc(server);
559                         rc = wait_event_killable_timeout(server->request_q,
560                                 has_credits(server, credits, num_credits), t);
561                         cifs_num_waiters_dec(server);
562                         if (!rc) {
563                                 spin_lock(&server->req_lock);
564                                 scredits = *credits;
565                                 in_flight = server->in_flight;
566                                 spin_unlock(&server->req_lock);
567
568                                 trace_smb3_credit_timeout(server->CurrentMid,
569                                                 server->conn_id, server->hostname, scredits,
570                                                 num_credits, in_flight);
571                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
572                                                 timeout);
573                                 return -EBUSY;
574                         }
575                         if (rc == -ERESTARTSYS)
576                                 return -ERESTARTSYS;
577                         spin_lock(&server->req_lock);
578                 } else {
579                         spin_unlock(&server->req_lock);
580
581                         spin_lock(&cifs_tcp_ses_lock);
582                         if (server->tcpStatus == CifsExiting) {
583                                 spin_unlock(&cifs_tcp_ses_lock);
584                                 return -ENOENT;
585                         }
586                         spin_unlock(&cifs_tcp_ses_lock);
587
588                         /*
589                          * For normal commands, reserve the last MAX_COMPOUND
590                          * credits to compound requests.
591                          * Otherwise these compounds could be permanently
592                          * starved for credits by single-credit requests.
593                          *
594                          * To prevent spinning CPU, block this thread until
595                          * there are >MAX_COMPOUND credits available.
596                          * But only do this is we already have a lot of
597                          * credits in flight to avoid triggering this check
598                          * for servers that are slow to hand out credits on
599                          * new sessions.
600                          */
601                         spin_lock(&server->req_lock);
602                         if (!optype && num_credits == 1 &&
603                             server->in_flight > 2 * MAX_COMPOUND &&
604                             *credits <= MAX_COMPOUND) {
605                                 spin_unlock(&server->req_lock);
606
607                                 cifs_num_waiters_inc(server);
608                                 rc = wait_event_killable_timeout(
609                                         server->request_q,
610                                         has_credits(server, credits,
611                                                     MAX_COMPOUND + 1),
612                                         t);
613                                 cifs_num_waiters_dec(server);
614                                 if (!rc) {
615                                         spin_lock(&server->req_lock);
616                                         scredits = *credits;
617                                         in_flight = server->in_flight;
618                                         spin_unlock(&server->req_lock);
619
620                                         trace_smb3_credit_timeout(
621                                                         server->CurrentMid,
622                                                         server->conn_id, server->hostname,
623                                                         scredits, num_credits, in_flight);
624                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
625                                                         timeout);
626                                         return -EBUSY;
627                                 }
628                                 if (rc == -ERESTARTSYS)
629                                         return -ERESTARTSYS;
630                                 spin_lock(&server->req_lock);
631                                 continue;
632                         }
633
634                         /*
635                          * Can not count locking commands against total
636                          * as they are allowed to block on server.
637                          */
638
639                         /* update # of requests on the wire to server */
640                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
641                                 *credits -= num_credits;
642                                 server->in_flight += num_credits;
643                                 if (server->in_flight > server->max_in_flight)
644                                         server->max_in_flight = server->in_flight;
645                                 *instance = server->reconnect_instance;
646                         }
647                         scredits = *credits;
648                         in_flight = server->in_flight;
649                         spin_unlock(&server->req_lock);
650
651                         trace_smb3_add_credits(server->CurrentMid,
652                                         server->conn_id, server->hostname, scredits,
653                                         -(num_credits), in_flight);
654                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
655                                         __func__, num_credits, scredits);
656                         break;
657                 }
658         }
659         return 0;
660 }
661
662 static int
663 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
664                       unsigned int *instance)
665 {
666         return wait_for_free_credits(server, 1, -1, flags,
667                                      instance);
668 }
669
670 static int
671 wait_for_compound_request(struct TCP_Server_Info *server, int num,
672                           const int flags, unsigned int *instance)
673 {
674         int *credits;
675         int scredits, in_flight;
676
677         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
678
679         spin_lock(&server->req_lock);
680         scredits = *credits;
681         in_flight = server->in_flight;
682
683         if (*credits < num) {
684                 /*
685                  * If the server is tight on resources or just gives us less
686                  * credits for other reasons (e.g. requests are coming out of
687                  * order and the server delays granting more credits until it
688                  * processes a missing mid) and we exhausted most available
689                  * credits there may be situations when we try to send
690                  * a compound request but we don't have enough credits. At this
691                  * point the client needs to decide if it should wait for
692                  * additional credits or fail the request. If at least one
693                  * request is in flight there is a high probability that the
694                  * server will return enough credits to satisfy this compound
695                  * request.
696                  *
697                  * Return immediately if no requests in flight since we will be
698                  * stuck on waiting for credits.
699                  */
700                 if (server->in_flight == 0) {
701                         spin_unlock(&server->req_lock);
702                         trace_smb3_insufficient_credits(server->CurrentMid,
703                                         server->conn_id, server->hostname, scredits,
704                                         num, in_flight);
705                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
706                                         __func__, in_flight, num, scredits);
707                         return -EDEADLK;
708                 }
709         }
710         spin_unlock(&server->req_lock);
711
712         return wait_for_free_credits(server, num, 60000, flags,
713                                      instance);
714 }
715
716 int
717 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
718                       unsigned int *num, struct cifs_credits *credits)
719 {
720         *num = size;
721         credits->value = 0;
722         credits->instance = server->reconnect_instance;
723         return 0;
724 }
725
726 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
727                         struct mid_q_entry **ppmidQ)
728 {
729         spin_lock(&cifs_tcp_ses_lock);
730         if (ses->status == CifsNew) {
731                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
732                         (in_buf->Command != SMB_COM_NEGOTIATE)) {
733                         spin_unlock(&cifs_tcp_ses_lock);
734                         return -EAGAIN;
735                 }
736                 /* else ok - we are setting up session */
737         }
738
739         if (ses->status == CifsExiting) {
740                 /* check if SMB session is bad because we are setting it up */
741                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
742                         spin_unlock(&cifs_tcp_ses_lock);
743                         return -EAGAIN;
744                 }
745                 /* else ok - we are shutting down session */
746         }
747         spin_unlock(&cifs_tcp_ses_lock);
748
749         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
750         if (*ppmidQ == NULL)
751                 return -ENOMEM;
752         spin_lock(&GlobalMid_Lock);
753         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
754         spin_unlock(&GlobalMid_Lock);
755         return 0;
756 }
757
758 static int
759 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
760 {
761         int error;
762
763         error = wait_event_freezekillable_unsafe(server->response_q,
764                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
765         if (error < 0)
766                 return -ERESTARTSYS;
767
768         return 0;
769 }
770
771 struct mid_q_entry *
772 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
773 {
774         int rc;
775         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
776         struct mid_q_entry *mid;
777
778         if (rqst->rq_iov[0].iov_len != 4 ||
779             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
780                 return ERR_PTR(-EIO);
781
782         /* enable signing if server requires it */
783         if (server->sign)
784                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
785
786         mid = AllocMidQEntry(hdr, server);
787         if (mid == NULL)
788                 return ERR_PTR(-ENOMEM);
789
790         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
791         if (rc) {
792                 DeleteMidQEntry(mid);
793                 return ERR_PTR(rc);
794         }
795
796         return mid;
797 }
798
799 /*
800  * Send a SMB request and set the callback function in the mid to handle
801  * the result. Caller is responsible for dealing with timeouts.
802  */
803 int
804 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
805                 mid_receive_t *receive, mid_callback_t *callback,
806                 mid_handle_t *handle, void *cbdata, const int flags,
807                 const struct cifs_credits *exist_credits)
808 {
809         int rc;
810         struct mid_q_entry *mid;
811         struct cifs_credits credits = { .value = 0, .instance = 0 };
812         unsigned int instance;
813         int optype;
814
815         optype = flags & CIFS_OP_MASK;
816
817         if ((flags & CIFS_HAS_CREDITS) == 0) {
818                 rc = wait_for_free_request(server, flags, &instance);
819                 if (rc)
820                         return rc;
821                 credits.value = 1;
822                 credits.instance = instance;
823         } else
824                 instance = exist_credits->instance;
825
826         mutex_lock(&server->srv_mutex);
827
828         /*
829          * We can't use credits obtained from the previous session to send this
830          * request. Check if there were reconnects after we obtained credits and
831          * return -EAGAIN in such cases to let callers handle it.
832          */
833         if (instance != server->reconnect_instance) {
834                 mutex_unlock(&server->srv_mutex);
835                 add_credits_and_wake_if(server, &credits, optype);
836                 return -EAGAIN;
837         }
838
839         mid = server->ops->setup_async_request(server, rqst);
840         if (IS_ERR(mid)) {
841                 mutex_unlock(&server->srv_mutex);
842                 add_credits_and_wake_if(server, &credits, optype);
843                 return PTR_ERR(mid);
844         }
845
846         mid->receive = receive;
847         mid->callback = callback;
848         mid->callback_data = cbdata;
849         mid->handle = handle;
850         mid->mid_state = MID_REQUEST_SUBMITTED;
851
852         /* put it on the pending_mid_q */
853         spin_lock(&GlobalMid_Lock);
854         list_add_tail(&mid->qhead, &server->pending_mid_q);
855         spin_unlock(&GlobalMid_Lock);
856
857         /*
858          * Need to store the time in mid before calling I/O. For call_async,
859          * I/O response may come back and free the mid entry on another thread.
860          */
861         cifs_save_when_sent(mid);
862         cifs_in_send_inc(server);
863         rc = smb_send_rqst(server, 1, rqst, flags);
864         cifs_in_send_dec(server);
865
866         if (rc < 0) {
867                 revert_current_mid(server, mid->credits);
868                 server->sequence_number -= 2;
869                 cifs_delete_mid(mid);
870         }
871
872         mutex_unlock(&server->srv_mutex);
873
874         if (rc == 0)
875                 return 0;
876
877         add_credits_and_wake_if(server, &credits, optype);
878         return rc;
879 }
880
881 /*
882  *
883  * Send an SMB Request.  No response info (other than return code)
884  * needs to be parsed.
885  *
886  * flags indicate the type of request buffer and how long to wait
887  * and whether to log NT STATUS code (error) before mapping it to POSIX error
888  *
889  */
890 int
891 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
892                  char *in_buf, int flags)
893 {
894         int rc;
895         struct kvec iov[1];
896         struct kvec rsp_iov;
897         int resp_buf_type;
898
899         iov[0].iov_base = in_buf;
900         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
901         flags |= CIFS_NO_RSP_BUF;
902         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
903         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
904
905         return rc;
906 }
907
908 static int
909 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
910 {
911         int rc = 0;
912
913         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
914                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
915
916         spin_lock(&GlobalMid_Lock);
917         switch (mid->mid_state) {
918         case MID_RESPONSE_RECEIVED:
919                 spin_unlock(&GlobalMid_Lock);
920                 return rc;
921         case MID_RETRY_NEEDED:
922                 rc = -EAGAIN;
923                 break;
924         case MID_RESPONSE_MALFORMED:
925                 rc = -EIO;
926                 break;
927         case MID_SHUTDOWN:
928                 rc = -EHOSTDOWN;
929                 break;
930         default:
931                 if (!(mid->mid_flags & MID_DELETED)) {
932                         list_del_init(&mid->qhead);
933                         mid->mid_flags |= MID_DELETED;
934                 }
935                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
936                          __func__, mid->mid, mid->mid_state);
937                 rc = -EIO;
938         }
939         spin_unlock(&GlobalMid_Lock);
940
941         DeleteMidQEntry(mid);
942         return rc;
943 }
944
945 static inline int
946 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
947             struct mid_q_entry *mid)
948 {
949         return server->ops->send_cancel ?
950                                 server->ops->send_cancel(server, rqst, mid) : 0;
951 }
952
953 int
954 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
955                    bool log_error)
956 {
957         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
958
959         dump_smb(mid->resp_buf, min_t(u32, 92, len));
960
961         /* convert the length into a more usable form */
962         if (server->sign) {
963                 struct kvec iov[2];
964                 int rc = 0;
965                 struct smb_rqst rqst = { .rq_iov = iov,
966                                          .rq_nvec = 2 };
967
968                 iov[0].iov_base = mid->resp_buf;
969                 iov[0].iov_len = 4;
970                 iov[1].iov_base = (char *)mid->resp_buf + 4;
971                 iov[1].iov_len = len - 4;
972                 /* FIXME: add code to kill session */
973                 rc = cifs_verify_signature(&rqst, server,
974                                            mid->sequence_number);
975                 if (rc)
976                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
977                                  rc);
978         }
979
980         /* BB special case reconnect tid and uid here? */
981         return map_and_check_smb_error(mid, log_error);
982 }
983
984 struct mid_q_entry *
985 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
986                    struct smb_rqst *rqst)
987 {
988         int rc;
989         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
990         struct mid_q_entry *mid;
991
992         if (rqst->rq_iov[0].iov_len != 4 ||
993             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
994                 return ERR_PTR(-EIO);
995
996         rc = allocate_mid(ses, hdr, &mid);
997         if (rc)
998                 return ERR_PTR(rc);
999         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1000         if (rc) {
1001                 cifs_delete_mid(mid);
1002                 return ERR_PTR(rc);
1003         }
1004         return mid;
1005 }
1006
1007 static void
1008 cifs_compound_callback(struct mid_q_entry *mid)
1009 {
1010         struct TCP_Server_Info *server = mid->server;
1011         struct cifs_credits credits;
1012
1013         credits.value = server->ops->get_credits(mid);
1014         credits.instance = server->reconnect_instance;
1015
1016         add_credits(server, &credits, mid->optype);
1017 }
1018
1019 static void
1020 cifs_compound_last_callback(struct mid_q_entry *mid)
1021 {
1022         cifs_compound_callback(mid);
1023         cifs_wake_up_task(mid);
1024 }
1025
1026 static void
1027 cifs_cancelled_callback(struct mid_q_entry *mid)
1028 {
1029         cifs_compound_callback(mid);
1030         DeleteMidQEntry(mid);
1031 }
1032
1033 /*
1034  * Return a channel (master if none) of @ses that can be used to send
1035  * regular requests.
1036  *
1037  * If we are currently binding a new channel (negprot/sess.setup),
1038  * return the new incomplete channel.
1039  */
1040 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1041 {
1042         uint index = 0;
1043
1044         if (!ses)
1045                 return NULL;
1046
1047         /* round robin */
1048         index = (uint)atomic_inc_return(&ses->chan_seq);
1049
1050         spin_lock(&ses->chan_lock);
1051         index %= ses->chan_count;
1052         spin_unlock(&ses->chan_lock);
1053
1054         return ses->chans[index].server;
1055 }
1056
1057 int
1058 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1059                    struct TCP_Server_Info *server,
1060                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1061                    int *resp_buf_type, struct kvec *resp_iov)
1062 {
1063         int i, j, optype, rc = 0;
1064         struct mid_q_entry *midQ[MAX_COMPOUND];
1065         bool cancelled_mid[MAX_COMPOUND] = {false};
1066         struct cifs_credits credits[MAX_COMPOUND] = {
1067                 { .value = 0, .instance = 0 }
1068         };
1069         unsigned int instance;
1070         char *buf;
1071
1072         optype = flags & CIFS_OP_MASK;
1073
1074         for (i = 0; i < num_rqst; i++)
1075                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1076
1077         if (!ses || !ses->server || !server) {
1078                 cifs_dbg(VFS, "Null session\n");
1079                 return -EIO;
1080         }
1081
1082         spin_lock(&cifs_tcp_ses_lock);
1083         if (server->tcpStatus == CifsExiting) {
1084                 spin_unlock(&cifs_tcp_ses_lock);
1085                 return -ENOENT;
1086         }
1087         spin_unlock(&cifs_tcp_ses_lock);
1088
1089         /*
1090          * Wait for all the requests to become available.
1091          * This approach still leaves the possibility to be stuck waiting for
1092          * credits if the server doesn't grant credits to the outstanding
1093          * requests and if the client is completely idle, not generating any
1094          * other requests.
1095          * This can be handled by the eventual session reconnect.
1096          */
1097         rc = wait_for_compound_request(server, num_rqst, flags,
1098                                        &instance);
1099         if (rc)
1100                 return rc;
1101
1102         for (i = 0; i < num_rqst; i++) {
1103                 credits[i].value = 1;
1104                 credits[i].instance = instance;
1105         }
1106
1107         /*
1108          * Make sure that we sign in the same order that we send on this socket
1109          * and avoid races inside tcp sendmsg code that could cause corruption
1110          * of smb data.
1111          */
1112
1113         mutex_lock(&server->srv_mutex);
1114
1115         /*
1116          * All the parts of the compound chain belong obtained credits from the
1117          * same session. We can not use credits obtained from the previous
1118          * session to send this request. Check if there were reconnects after
1119          * we obtained credits and return -EAGAIN in such cases to let callers
1120          * handle it.
1121          */
1122         if (instance != server->reconnect_instance) {
1123                 mutex_unlock(&server->srv_mutex);
1124                 for (j = 0; j < num_rqst; j++)
1125                         add_credits(server, &credits[j], optype);
1126                 return -EAGAIN;
1127         }
1128
1129         for (i = 0; i < num_rqst; i++) {
1130                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1131                 if (IS_ERR(midQ[i])) {
1132                         revert_current_mid(server, i);
1133                         for (j = 0; j < i; j++)
1134                                 cifs_delete_mid(midQ[j]);
1135                         mutex_unlock(&server->srv_mutex);
1136
1137                         /* Update # of requests on wire to server */
1138                         for (j = 0; j < num_rqst; j++)
1139                                 add_credits(server, &credits[j], optype);
1140                         return PTR_ERR(midQ[i]);
1141                 }
1142
1143                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1144                 midQ[i]->optype = optype;
1145                 /*
1146                  * Invoke callback for every part of the compound chain
1147                  * to calculate credits properly. Wake up this thread only when
1148                  * the last element is received.
1149                  */
1150                 if (i < num_rqst - 1)
1151                         midQ[i]->callback = cifs_compound_callback;
1152                 else
1153                         midQ[i]->callback = cifs_compound_last_callback;
1154         }
1155         cifs_in_send_inc(server);
1156         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1157         cifs_in_send_dec(server);
1158
1159         for (i = 0; i < num_rqst; i++)
1160                 cifs_save_when_sent(midQ[i]);
1161
1162         if (rc < 0) {
1163                 revert_current_mid(server, num_rqst);
1164                 server->sequence_number -= 2;
1165         }
1166
1167         mutex_unlock(&server->srv_mutex);
1168
1169         /*
1170          * If sending failed for some reason or it is an oplock break that we
1171          * will not receive a response to - return credits back
1172          */
1173         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1174                 for (i = 0; i < num_rqst; i++)
1175                         add_credits(server, &credits[i], optype);
1176                 goto out;
1177         }
1178
1179         /*
1180          * At this point the request is passed to the network stack - we assume
1181          * that any credits taken from the server structure on the client have
1182          * been spent and we can't return them back. Once we receive responses
1183          * we will collect credits granted by the server in the mid callbacks
1184          * and add those credits to the server structure.
1185          */
1186
1187         /*
1188          * Compounding is never used during session establish.
1189          */
1190         spin_lock(&cifs_tcp_ses_lock);
1191         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1192                 spin_unlock(&cifs_tcp_ses_lock);
1193
1194                 mutex_lock(&server->srv_mutex);
1195                 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1196                 mutex_unlock(&server->srv_mutex);
1197
1198                 spin_lock(&cifs_tcp_ses_lock);
1199         }
1200         spin_unlock(&cifs_tcp_ses_lock);
1201
1202         for (i = 0; i < num_rqst; i++) {
1203                 rc = wait_for_response(server, midQ[i]);
1204                 if (rc != 0)
1205                         break;
1206         }
1207         if (rc != 0) {
1208                 for (; i < num_rqst; i++) {
1209                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1210                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1211                         send_cancel(server, &rqst[i], midQ[i]);
1212                         spin_lock(&GlobalMid_Lock);
1213                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1214                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1215                                 midQ[i]->callback = cifs_cancelled_callback;
1216                                 cancelled_mid[i] = true;
1217                                 credits[i].value = 0;
1218                         }
1219                         spin_unlock(&GlobalMid_Lock);
1220                 }
1221         }
1222
1223         for (i = 0; i < num_rqst; i++) {
1224                 if (rc < 0)
1225                         goto out;
1226
1227                 rc = cifs_sync_mid_result(midQ[i], server);
1228                 if (rc != 0) {
1229                         /* mark this mid as cancelled to not free it below */
1230                         cancelled_mid[i] = true;
1231                         goto out;
1232                 }
1233
1234                 if (!midQ[i]->resp_buf ||
1235                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1236                         rc = -EIO;
1237                         cifs_dbg(FYI, "Bad MID state?\n");
1238                         goto out;
1239                 }
1240
1241                 buf = (char *)midQ[i]->resp_buf;
1242                 resp_iov[i].iov_base = buf;
1243                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1244                         server->vals->header_preamble_size;
1245
1246                 if (midQ[i]->large_buf)
1247                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1248                 else
1249                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1250
1251                 rc = server->ops->check_receive(midQ[i], server,
1252                                                      flags & CIFS_LOG_ERROR);
1253
1254                 /* mark it so buf will not be freed by cifs_delete_mid */
1255                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1256                         midQ[i]->resp_buf = NULL;
1257
1258         }
1259
1260         /*
1261          * Compounding is never used during session establish.
1262          */
1263         spin_lock(&cifs_tcp_ses_lock);
1264         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1265                 struct kvec iov = {
1266                         .iov_base = resp_iov[0].iov_base,
1267                         .iov_len = resp_iov[0].iov_len
1268                 };
1269                 spin_unlock(&cifs_tcp_ses_lock);
1270                 mutex_lock(&server->srv_mutex);
1271                 smb311_update_preauth_hash(ses, server, &iov, 1);
1272                 mutex_unlock(&server->srv_mutex);
1273                 spin_lock(&cifs_tcp_ses_lock);
1274         }
1275         spin_unlock(&cifs_tcp_ses_lock);
1276
1277 out:
1278         /*
1279          * This will dequeue all mids. After this it is important that the
1280          * demultiplex_thread will not process any of these mids any futher.
1281          * This is prevented above by using a noop callback that will not
1282          * wake this thread except for the very last PDU.
1283          */
1284         for (i = 0; i < num_rqst; i++) {
1285                 if (!cancelled_mid[i])
1286                         cifs_delete_mid(midQ[i]);
1287         }
1288
1289         return rc;
1290 }
1291
1292 int
1293 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1294                struct TCP_Server_Info *server,
1295                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1296                struct kvec *resp_iov)
1297 {
1298         return compound_send_recv(xid, ses, server, flags, 1,
1299                                   rqst, resp_buf_type, resp_iov);
1300 }
1301
1302 int
1303 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1304              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1305              const int flags, struct kvec *resp_iov)
1306 {
1307         struct smb_rqst rqst;
1308         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1309         int rc;
1310
1311         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1312                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1313                                         GFP_KERNEL);
1314                 if (!new_iov) {
1315                         /* otherwise cifs_send_recv below sets resp_buf_type */
1316                         *resp_buf_type = CIFS_NO_BUFFER;
1317                         return -ENOMEM;
1318                 }
1319         } else
1320                 new_iov = s_iov;
1321
1322         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1323         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1324
1325         new_iov[0].iov_base = new_iov[1].iov_base;
1326         new_iov[0].iov_len = 4;
1327         new_iov[1].iov_base += 4;
1328         new_iov[1].iov_len -= 4;
1329
1330         memset(&rqst, 0, sizeof(struct smb_rqst));
1331         rqst.rq_iov = new_iov;
1332         rqst.rq_nvec = n_vec + 1;
1333
1334         rc = cifs_send_recv(xid, ses, ses->server,
1335                             &rqst, resp_buf_type, flags, resp_iov);
1336         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1337                 kfree(new_iov);
1338         return rc;
1339 }
1340
1341 int
1342 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1343             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1344             int *pbytes_returned, const int flags)
1345 {
1346         int rc = 0;
1347         struct mid_q_entry *midQ;
1348         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1349         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1350         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1351         struct cifs_credits credits = { .value = 1, .instance = 0 };
1352         struct TCP_Server_Info *server;
1353
1354         if (ses == NULL) {
1355                 cifs_dbg(VFS, "Null smb session\n");
1356                 return -EIO;
1357         }
1358         server = ses->server;
1359         if (server == NULL) {
1360                 cifs_dbg(VFS, "Null tcp session\n");
1361                 return -EIO;
1362         }
1363
1364         spin_lock(&cifs_tcp_ses_lock);
1365         if (server->tcpStatus == CifsExiting) {
1366                 spin_unlock(&cifs_tcp_ses_lock);
1367                 return -ENOENT;
1368         }
1369         spin_unlock(&cifs_tcp_ses_lock);
1370
1371         /* Ensure that we do not send more than 50 overlapping requests
1372            to the same server. We may make this configurable later or
1373            use ses->maxReq */
1374
1375         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1376                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1377                                 len);
1378                 return -EIO;
1379         }
1380
1381         rc = wait_for_free_request(server, flags, &credits.instance);
1382         if (rc)
1383                 return rc;
1384
1385         /* make sure that we sign in the same order that we send on this socket
1386            and avoid races inside tcp sendmsg code that could cause corruption
1387            of smb data */
1388
1389         mutex_lock(&server->srv_mutex);
1390
1391         rc = allocate_mid(ses, in_buf, &midQ);
1392         if (rc) {
1393                 mutex_unlock(&server->srv_mutex);
1394                 /* Update # of requests on wire to server */
1395                 add_credits(server, &credits, 0);
1396                 return rc;
1397         }
1398
1399         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1400         if (rc) {
1401                 mutex_unlock(&server->srv_mutex);
1402                 goto out;
1403         }
1404
1405         midQ->mid_state = MID_REQUEST_SUBMITTED;
1406
1407         cifs_in_send_inc(server);
1408         rc = smb_send(server, in_buf, len);
1409         cifs_in_send_dec(server);
1410         cifs_save_when_sent(midQ);
1411
1412         if (rc < 0)
1413                 server->sequence_number -= 2;
1414
1415         mutex_unlock(&server->srv_mutex);
1416
1417         if (rc < 0)
1418                 goto out;
1419
1420         rc = wait_for_response(server, midQ);
1421         if (rc != 0) {
1422                 send_cancel(server, &rqst, midQ);
1423                 spin_lock(&GlobalMid_Lock);
1424                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1425                         /* no longer considered to be "in-flight" */
1426                         midQ->callback = DeleteMidQEntry;
1427                         spin_unlock(&GlobalMid_Lock);
1428                         add_credits(server, &credits, 0);
1429                         return rc;
1430                 }
1431                 spin_unlock(&GlobalMid_Lock);
1432         }
1433
1434         rc = cifs_sync_mid_result(midQ, server);
1435         if (rc != 0) {
1436                 add_credits(server, &credits, 0);
1437                 return rc;
1438         }
1439
1440         if (!midQ->resp_buf || !out_buf ||
1441             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1442                 rc = -EIO;
1443                 cifs_server_dbg(VFS, "Bad MID state?\n");
1444                 goto out;
1445         }
1446
1447         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1448         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1449         rc = cifs_check_receive(midQ, server, 0);
1450 out:
1451         cifs_delete_mid(midQ);
1452         add_credits(server, &credits, 0);
1453
1454         return rc;
1455 }
1456
1457 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1458    blocking lock to return. */
1459
1460 static int
1461 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1462                         struct smb_hdr *in_buf,
1463                         struct smb_hdr *out_buf)
1464 {
1465         int bytes_returned;
1466         struct cifs_ses *ses = tcon->ses;
1467         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1468
1469         /* We just modify the current in_buf to change
1470            the type of lock from LOCKING_ANDX_SHARED_LOCK
1471            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1472            LOCKING_ANDX_CANCEL_LOCK. */
1473
1474         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1475         pSMB->Timeout = 0;
1476         pSMB->hdr.Mid = get_next_mid(ses->server);
1477
1478         return SendReceive(xid, ses, in_buf, out_buf,
1479                         &bytes_returned, 0);
1480 }
1481
1482 int
1483 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1484             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1485             int *pbytes_returned)
1486 {
1487         int rc = 0;
1488         int rstart = 0;
1489         struct mid_q_entry *midQ;
1490         struct cifs_ses *ses;
1491         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1492         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1493         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1494         unsigned int instance;
1495         struct TCP_Server_Info *server;
1496
1497         if (tcon == NULL || tcon->ses == NULL) {
1498                 cifs_dbg(VFS, "Null smb session\n");
1499                 return -EIO;
1500         }
1501         ses = tcon->ses;
1502         server = ses->server;
1503
1504         if (server == NULL) {
1505                 cifs_dbg(VFS, "Null tcp session\n");
1506                 return -EIO;
1507         }
1508
1509         spin_lock(&cifs_tcp_ses_lock);
1510         if (server->tcpStatus == CifsExiting) {
1511                 spin_unlock(&cifs_tcp_ses_lock);
1512                 return -ENOENT;
1513         }
1514         spin_unlock(&cifs_tcp_ses_lock);
1515
1516         /* Ensure that we do not send more than 50 overlapping requests
1517            to the same server. We may make this configurable later or
1518            use ses->maxReq */
1519
1520         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1521                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1522                               len);
1523                 return -EIO;
1524         }
1525
1526         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1527         if (rc)
1528                 return rc;
1529
1530         /* make sure that we sign in the same order that we send on this socket
1531            and avoid races inside tcp sendmsg code that could cause corruption
1532            of smb data */
1533
1534         mutex_lock(&server->srv_mutex);
1535
1536         rc = allocate_mid(ses, in_buf, &midQ);
1537         if (rc) {
1538                 mutex_unlock(&server->srv_mutex);
1539                 return rc;
1540         }
1541
1542         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1543         if (rc) {
1544                 cifs_delete_mid(midQ);
1545                 mutex_unlock(&server->srv_mutex);
1546                 return rc;
1547         }
1548
1549         midQ->mid_state = MID_REQUEST_SUBMITTED;
1550         cifs_in_send_inc(server);
1551         rc = smb_send(server, in_buf, len);
1552         cifs_in_send_dec(server);
1553         cifs_save_when_sent(midQ);
1554
1555         if (rc < 0)
1556                 server->sequence_number -= 2;
1557
1558         mutex_unlock(&server->srv_mutex);
1559
1560         if (rc < 0) {
1561                 cifs_delete_mid(midQ);
1562                 return rc;
1563         }
1564
1565         /* Wait for a reply - allow signals to interrupt. */
1566         rc = wait_event_interruptible(server->response_q,
1567                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1568                 ((server->tcpStatus != CifsGood) &&
1569                  (server->tcpStatus != CifsNew)));
1570
1571         /* Were we interrupted by a signal ? */
1572         spin_lock(&cifs_tcp_ses_lock);
1573         if ((rc == -ERESTARTSYS) &&
1574                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1575                 ((server->tcpStatus == CifsGood) ||
1576                  (server->tcpStatus == CifsNew))) {
1577                 spin_unlock(&cifs_tcp_ses_lock);
1578
1579                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1580                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1581                            blocking lock to return. */
1582                         rc = send_cancel(server, &rqst, midQ);
1583                         if (rc) {
1584                                 cifs_delete_mid(midQ);
1585                                 return rc;
1586                         }
1587                 } else {
1588                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1589                            to cause the blocking lock to return. */
1590
1591                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1592
1593                         /* If we get -ENOLCK back the lock may have
1594                            already been removed. Don't exit in this case. */
1595                         if (rc && rc != -ENOLCK) {
1596                                 cifs_delete_mid(midQ);
1597                                 return rc;
1598                         }
1599                 }
1600
1601                 rc = wait_for_response(server, midQ);
1602                 if (rc) {
1603                         send_cancel(server, &rqst, midQ);
1604                         spin_lock(&GlobalMid_Lock);
1605                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1606                                 /* no longer considered to be "in-flight" */
1607                                 midQ->callback = DeleteMidQEntry;
1608                                 spin_unlock(&GlobalMid_Lock);
1609                                 return rc;
1610                         }
1611                         spin_unlock(&GlobalMid_Lock);
1612                 }
1613
1614                 /* We got the response - restart system call. */
1615                 rstart = 1;
1616                 spin_lock(&cifs_tcp_ses_lock);
1617         }
1618         spin_unlock(&cifs_tcp_ses_lock);
1619
1620         rc = cifs_sync_mid_result(midQ, server);
1621         if (rc != 0)
1622                 return rc;
1623
1624         /* rcvd frame is ok */
1625         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1626                 rc = -EIO;
1627                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1628                 goto out;
1629         }
1630
1631         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1632         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1633         rc = cifs_check_receive(midQ, server, 0);
1634 out:
1635         cifs_delete_mid(midQ);
1636         if (rstart && rc == -EACCES)
1637                 return -ERESTARTSYS;
1638         return rc;
1639 }