Merge tag 'drm-msm-fixes-2021-05-09' of https://gitlab.freedesktop.org/drm/msm into...
[linux-2.6-microblaze.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         get_task_struct(current);
80         temp->creator = current;
81         temp->callback = cifs_wake_up_task;
82         temp->callback_data = current;
83
84         atomic_inc(&midCount);
85         temp->mid_state = MID_REQUEST_ALLOCATED;
86         return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91         struct mid_q_entry *midEntry =
92                         container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94         __le16 command = midEntry->server->vals->lock_cmd;
95         __u16 smb_cmd = le16_to_cpu(midEntry->command);
96         unsigned long now;
97         unsigned long roundtrip_time;
98 #endif
99         struct TCP_Server_Info *server = midEntry->server;
100
101         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103             server->ops->handle_cancelled_mid)
104                 server->ops->handle_cancelled_mid(midEntry, server);
105
106         midEntry->mid_state = MID_FREE;
107         atomic_dec(&midCount);
108         if (midEntry->large_buf)
109                 cifs_buf_release(midEntry->resp_buf);
110         else
111                 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113         now = jiffies;
114         if (now < midEntry->when_alloc)
115                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116         roundtrip_time = now - midEntry->when_alloc;
117
118         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120                         server->slowest_cmd[smb_cmd] = roundtrip_time;
121                         server->fastest_cmd[smb_cmd] = roundtrip_time;
122                 } else {
123                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
125                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
127                 }
128                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129                 server->time_per_cmd[smb_cmd] += roundtrip_time;
130         }
131         /*
132          * commands taking longer than one second (default) can be indications
133          * that something is wrong, unless it is quite a slow link or a very
134          * busy server. Note that this calc is unlikely or impossible to wrap
135          * as long as slow_rsp_threshold is not set way above recommended max
136          * value (32767 ie 9 hours) and is generally harmless even if wrong
137          * since only affects debug counters - so leaving the calc as simple
138          * comparison rather than doing multiple conversions and overflow
139          * checks
140          */
141         if ((slow_rsp_threshold != 0) &&
142             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143             (midEntry->command != command)) {
144                 /*
145                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146                  * NB: le16_to_cpu returns unsigned so can not be negative below
147                  */
148                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152                                midEntry->when_sent, midEntry->when_received);
153                 if (cifsFYI & CIFS_TIMER) {
154                         pr_debug("slow rsp: cmd %d mid %llu",
155                                  midEntry->command, midEntry->mid);
156                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157                                   now - midEntry->when_alloc,
158                                   now - midEntry->when_sent,
159                                   now - midEntry->when_received);
160                 }
161         }
162 #endif
163         put_task_struct(midEntry->creator);
164
165         mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170         spin_lock(&GlobalMid_Lock);
171         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172         spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177         cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183         spin_lock(&GlobalMid_Lock);
184         if (!(mid->mid_flags & MID_DELETED)) {
185                 list_del_init(&mid->qhead);
186                 mid->mid_flags |= MID_DELETED;
187         }
188         spin_unlock(&GlobalMid_Lock);
189
190         DeleteMidQEntry(mid);
191 }
192
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:     Server to send the data to
196  * @smb_msg:    Message to send
197  * @sent:       amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204               size_t *sent)
205 {
206         int rc = 0;
207         int retries = 0;
208         struct socket *ssocket = server->ssocket;
209
210         *sent = 0;
211
212         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213         smb_msg->msg_namelen = sizeof(struct sockaddr);
214         smb_msg->msg_control = NULL;
215         smb_msg->msg_controllen = 0;
216         if (server->noblocksnd)
217                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218         else
219                 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221         while (msg_data_left(smb_msg)) {
222                 /*
223                  * If blocking send, we try 3 times, since each can block
224                  * for 5 seconds. For nonblocking  we have to try more
225                  * but wait increasing amounts of time allowing time for
226                  * socket to clear.  The overall time we wait in either
227                  * case to send on the socket is about 15 seconds.
228                  * Similarly we wait for 15 seconds for a response from
229                  * the server in SendReceive[2] for the server to send
230                  * a response back for most types of requests (except
231                  * SMB Write past end of file which can be slow, and
232                  * blocking lock operations). NFS waits slightly longer
233                  * than CIFS, but this can make it take longer for
234                  * nonresponsive servers to be detected and 15 seconds
235                  * is more than enough time for modern networks to
236                  * send a packet.  In most cases if we fail to send
237                  * after the retries we will kill the socket and
238                  * reconnect which may clear the network problem.
239                  */
240                 rc = sock_sendmsg(ssocket, smb_msg);
241                 if (rc == -EAGAIN) {
242                         retries++;
243                         if (retries >= 14 ||
244                             (!server->noblocksnd && (retries > 2))) {
245                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246                                          ssocket);
247                                 return -EAGAIN;
248                         }
249                         msleep(1 << retries);
250                         continue;
251                 }
252
253                 if (rc < 0)
254                         return rc;
255
256                 if (rc == 0) {
257                         /* should never happen, letting socket clear before
258                            retrying is our only obvious option here */
259                         cifs_server_dbg(VFS, "tcp sent no data\n");
260                         msleep(500);
261                         continue;
262                 }
263
264                 /* send was at least partially successful */
265                 *sent += rc;
266                 retries = 0; /* in case we get ENOSPC on the next send */
267         }
268         return 0;
269 }
270
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274         unsigned int i;
275         struct kvec *iov;
276         int nvec;
277         unsigned long buflen = 0;
278
279         if (server->vals->header_preamble_size == 0 &&
280             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281                 iov = &rqst->rq_iov[1];
282                 nvec = rqst->rq_nvec - 1;
283         } else {
284                 iov = rqst->rq_iov;
285                 nvec = rqst->rq_nvec;
286         }
287
288         /* total up iov array first */
289         for (i = 0; i < nvec; i++)
290                 buflen += iov[i].iov_len;
291
292         /*
293          * Add in the page array if there is one. The caller needs to make
294          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295          * multiple pages ends at page boundary, rq_tailsz needs to be set to
296          * PAGE_SIZE.
297          */
298         if (rqst->rq_npages) {
299                 if (rqst->rq_npages == 1)
300                         buflen += rqst->rq_tailsz;
301                 else {
302                         /*
303                          * If there is more than one page, calculate the
304                          * buffer length based on rq_offset and rq_tailsz
305                          */
306                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307                                         rqst->rq_offset;
308                         buflen += rqst->rq_tailsz;
309                 }
310         }
311
312         return buflen;
313 }
314
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317                 struct smb_rqst *rqst)
318 {
319         int rc = 0;
320         struct kvec *iov;
321         int n_vec;
322         unsigned int send_length = 0;
323         unsigned int i, j;
324         sigset_t mask, oldmask;
325         size_t total_len = 0, sent, size;
326         struct socket *ssocket = server->ssocket;
327         struct msghdr smb_msg;
328         __be32 rfc1002_marker;
329
330         if (cifs_rdma_enabled(server)) {
331                 /* return -EAGAIN when connecting or reconnecting */
332                 rc = -EAGAIN;
333                 if (server->smbd_conn)
334                         rc = smbd_send(server, num_rqst, rqst);
335                 goto smbd_done;
336         }
337
338         if (ssocket == NULL)
339                 return -EAGAIN;
340
341         if (fatal_signal_pending(current)) {
342                 cifs_dbg(FYI, "signal pending before send request\n");
343                 return -ERESTARTSYS;
344         }
345
346         /* cork the socket */
347         tcp_sock_set_cork(ssocket->sk, true);
348
349         for (j = 0; j < num_rqst; j++)
350                 send_length += smb_rqst_len(server, &rqst[j]);
351         rfc1002_marker = cpu_to_be32(send_length);
352
353         /*
354          * We should not allow signals to interrupt the network send because
355          * any partial send will cause session reconnects thus increasing
356          * latency of system calls and overload a server with unnecessary
357          * requests.
358          */
359
360         sigfillset(&mask);
361         sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
363         /* Generate a rfc1002 marker for SMB2+ */
364         if (server->vals->header_preamble_size == 0) {
365                 struct kvec hiov = {
366                         .iov_base = &rfc1002_marker,
367                         .iov_len  = 4
368                 };
369                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370                 rc = smb_send_kvec(server, &smb_msg, &sent);
371                 if (rc < 0)
372                         goto unmask;
373
374                 total_len += sent;
375                 send_length += 4;
376         }
377
378         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
380         for (j = 0; j < num_rqst; j++) {
381                 iov = rqst[j].rq_iov;
382                 n_vec = rqst[j].rq_nvec;
383
384                 size = 0;
385                 for (i = 0; i < n_vec; i++) {
386                         dump_smb(iov[i].iov_base, iov[i].iov_len);
387                         size += iov[i].iov_len;
388                 }
389
390                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
391
392                 rc = smb_send_kvec(server, &smb_msg, &sent);
393                 if (rc < 0)
394                         goto unmask;
395
396                 total_len += sent;
397
398                 /* now walk the page array and send each page in it */
399                 for (i = 0; i < rqst[j].rq_npages; i++) {
400                         struct bio_vec bvec;
401
402                         bvec.bv_page = rqst[j].rq_pages[i];
403                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404                                              &bvec.bv_offset);
405
406                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407                                       &bvec, 1, bvec.bv_len);
408                         rc = smb_send_kvec(server, &smb_msg, &sent);
409                         if (rc < 0)
410                                 break;
411
412                         total_len += sent;
413                 }
414         }
415
416 unmask:
417         sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419         /*
420          * If signal is pending but we have already sent the whole packet to
421          * the server we need to return success status to allow a corresponding
422          * mid entry to be kept in the pending requests queue thus allowing
423          * to handle responses from the server by the client.
424          *
425          * If only part of the packet has been sent there is no need to hide
426          * interrupt because the session will be reconnected anyway, so there
427          * won't be any response from the server to handle.
428          */
429
430         if (signal_pending(current) && (total_len != send_length)) {
431                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432                 rc = -ERESTARTSYS;
433         }
434
435         /* uncork it */
436         tcp_sock_set_cork(ssocket->sk, false);
437
438         if ((total_len > 0) && (total_len != send_length)) {
439                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440                          send_length, total_len);
441                 /*
442                  * If we have only sent part of an SMB then the next SMB could
443                  * be taken as the remainder of this one. We need to kill the
444                  * socket so the server throws away the partial SMB
445                  */
446                 server->tcpStatus = CifsNeedReconnect;
447                 trace_smb3_partial_send_reconnect(server->CurrentMid,
448                                                   server->conn_id, server->hostname);
449         }
450 smbd_done:
451         if (rc < 0 && rc != -EINTR)
452                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453                          rc);
454         else if (rc > 0)
455                 rc = 0;
456
457         return rc;
458 }
459
460 static int
461 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462               struct smb_rqst *rqst, int flags)
463 {
464         struct kvec iov;
465         struct smb2_transform_hdr *tr_hdr;
466         struct smb_rqst cur_rqst[MAX_COMPOUND];
467         int rc;
468
469         if (!(flags & CIFS_TRANSFORM_REQ))
470                 return __smb_send_rqst(server, num_rqst, rqst);
471
472         if (num_rqst > MAX_COMPOUND - 1)
473                 return -ENOMEM;
474
475         if (!server->ops->init_transform_rq) {
476                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
477                 return -EIO;
478         }
479
480         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481         if (!tr_hdr)
482                 return -ENOMEM;
483
484         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485         memset(&iov, 0, sizeof(iov));
486         memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488         iov.iov_base = tr_hdr;
489         iov.iov_len = sizeof(*tr_hdr);
490         cur_rqst[0].rq_iov = &iov;
491         cur_rqst[0].rq_nvec = 1;
492
493         rc = server->ops->init_transform_rq(server, num_rqst + 1,
494                                             &cur_rqst[0], rqst);
495         if (rc)
496                 goto out;
497
498         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
500 out:
501         kfree(tr_hdr);
502         return rc;
503 }
504
505 int
506 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507          unsigned int smb_buf_length)
508 {
509         struct kvec iov[2];
510         struct smb_rqst rqst = { .rq_iov = iov,
511                                  .rq_nvec = 2 };
512
513         iov[0].iov_base = smb_buffer;
514         iov[0].iov_len = 4;
515         iov[1].iov_base = (char *)smb_buffer + 4;
516         iov[1].iov_len = smb_buf_length;
517
518         return __smb_send_rqst(server, 1, &rqst);
519 }
520
521 static int
522 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
523                       const int timeout, const int flags,
524                       unsigned int *instance)
525 {
526         long rc;
527         int *credits;
528         int optype;
529         long int t;
530         int scredits, in_flight;
531
532         if (timeout < 0)
533                 t = MAX_JIFFY_OFFSET;
534         else
535                 t = msecs_to_jiffies(timeout);
536
537         optype = flags & CIFS_OP_MASK;
538
539         *instance = 0;
540
541         credits = server->ops->get_credits_field(server, optype);
542         /* Since an echo is already inflight, no need to wait to send another */
543         if (*credits <= 0 && optype == CIFS_ECHO_OP)
544                 return -EAGAIN;
545
546         spin_lock(&server->req_lock);
547         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
548                 /* oplock breaks must not be held up */
549                 server->in_flight++;
550                 if (server->in_flight > server->max_in_flight)
551                         server->max_in_flight = server->in_flight;
552                 *credits -= 1;
553                 *instance = server->reconnect_instance;
554                 scredits = *credits;
555                 in_flight = server->in_flight;
556                 spin_unlock(&server->req_lock);
557
558                 trace_smb3_add_credits(server->CurrentMid,
559                                 server->conn_id, server->hostname, scredits, -1, in_flight);
560                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
561                                 __func__, 1, scredits);
562
563                 return 0;
564         }
565
566         while (1) {
567                 if (*credits < num_credits) {
568                         scredits = *credits;
569                         spin_unlock(&server->req_lock);
570
571                         cifs_num_waiters_inc(server);
572                         rc = wait_event_killable_timeout(server->request_q,
573                                 has_credits(server, credits, num_credits), t);
574                         cifs_num_waiters_dec(server);
575                         if (!rc) {
576                                 spin_lock(&server->req_lock);
577                                 scredits = *credits;
578                                 in_flight = server->in_flight;
579                                 spin_unlock(&server->req_lock);
580
581                                 trace_smb3_credit_timeout(server->CurrentMid,
582                                                 server->conn_id, server->hostname, scredits,
583                                                 num_credits, in_flight);
584                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
585                                                 timeout);
586                                 return -EBUSY;
587                         }
588                         if (rc == -ERESTARTSYS)
589                                 return -ERESTARTSYS;
590                         spin_lock(&server->req_lock);
591                 } else {
592                         if (server->tcpStatus == CifsExiting) {
593                                 spin_unlock(&server->req_lock);
594                                 return -ENOENT;
595                         }
596
597                         /*
598                          * For normal commands, reserve the last MAX_COMPOUND
599                          * credits to compound requests.
600                          * Otherwise these compounds could be permanently
601                          * starved for credits by single-credit requests.
602                          *
603                          * To prevent spinning CPU, block this thread until
604                          * there are >MAX_COMPOUND credits available.
605                          * But only do this is we already have a lot of
606                          * credits in flight to avoid triggering this check
607                          * for servers that are slow to hand out credits on
608                          * new sessions.
609                          */
610                         if (!optype && num_credits == 1 &&
611                             server->in_flight > 2 * MAX_COMPOUND &&
612                             *credits <= MAX_COMPOUND) {
613                                 spin_unlock(&server->req_lock);
614
615                                 cifs_num_waiters_inc(server);
616                                 rc = wait_event_killable_timeout(
617                                         server->request_q,
618                                         has_credits(server, credits,
619                                                     MAX_COMPOUND + 1),
620                                         t);
621                                 cifs_num_waiters_dec(server);
622                                 if (!rc) {
623                                         spin_lock(&server->req_lock);
624                                         scredits = *credits;
625                                         in_flight = server->in_flight;
626                                         spin_unlock(&server->req_lock);
627
628                                         trace_smb3_credit_timeout(
629                                                         server->CurrentMid,
630                                                         server->conn_id, server->hostname,
631                                                         scredits, num_credits, in_flight);
632                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
633                                                         timeout);
634                                         return -EBUSY;
635                                 }
636                                 if (rc == -ERESTARTSYS)
637                                         return -ERESTARTSYS;
638                                 spin_lock(&server->req_lock);
639                                 continue;
640                         }
641
642                         /*
643                          * Can not count locking commands against total
644                          * as they are allowed to block on server.
645                          */
646
647                         /* update # of requests on the wire to server */
648                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
649                                 *credits -= num_credits;
650                                 server->in_flight += num_credits;
651                                 if (server->in_flight > server->max_in_flight)
652                                         server->max_in_flight = server->in_flight;
653                                 *instance = server->reconnect_instance;
654                         }
655                         scredits = *credits;
656                         in_flight = server->in_flight;
657                         spin_unlock(&server->req_lock);
658
659                         trace_smb3_add_credits(server->CurrentMid,
660                                         server->conn_id, server->hostname, scredits,
661                                         -(num_credits), in_flight);
662                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
663                                         __func__, num_credits, scredits);
664                         break;
665                 }
666         }
667         return 0;
668 }
669
670 static int
671 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
672                       unsigned int *instance)
673 {
674         return wait_for_free_credits(server, 1, -1, flags,
675                                      instance);
676 }
677
678 static int
679 wait_for_compound_request(struct TCP_Server_Info *server, int num,
680                           const int flags, unsigned int *instance)
681 {
682         int *credits;
683         int scredits, in_flight;
684
685         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
686
687         spin_lock(&server->req_lock);
688         scredits = *credits;
689         in_flight = server->in_flight;
690
691         if (*credits < num) {
692                 /*
693                  * If the server is tight on resources or just gives us less
694                  * credits for other reasons (e.g. requests are coming out of
695                  * order and the server delays granting more credits until it
696                  * processes a missing mid) and we exhausted most available
697                  * credits there may be situations when we try to send
698                  * a compound request but we don't have enough credits. At this
699                  * point the client needs to decide if it should wait for
700                  * additional credits or fail the request. If at least one
701                  * request is in flight there is a high probability that the
702                  * server will return enough credits to satisfy this compound
703                  * request.
704                  *
705                  * Return immediately if no requests in flight since we will be
706                  * stuck on waiting for credits.
707                  */
708                 if (server->in_flight == 0) {
709                         spin_unlock(&server->req_lock);
710                         trace_smb3_insufficient_credits(server->CurrentMid,
711                                         server->conn_id, server->hostname, scredits,
712                                         num, in_flight);
713                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
714                                         __func__, in_flight, num, scredits);
715                         return -EDEADLK;
716                 }
717         }
718         spin_unlock(&server->req_lock);
719
720         return wait_for_free_credits(server, num, 60000, flags,
721                                      instance);
722 }
723
724 int
725 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
726                       unsigned int *num, struct cifs_credits *credits)
727 {
728         *num = size;
729         credits->value = 0;
730         credits->instance = server->reconnect_instance;
731         return 0;
732 }
733
734 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
735                         struct mid_q_entry **ppmidQ)
736 {
737         if (ses->server->tcpStatus == CifsExiting) {
738                 return -ENOENT;
739         }
740
741         if (ses->server->tcpStatus == CifsNeedReconnect) {
742                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
743                 return -EAGAIN;
744         }
745
746         if (ses->status == CifsNew) {
747                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
748                         (in_buf->Command != SMB_COM_NEGOTIATE))
749                         return -EAGAIN;
750                 /* else ok - we are setting up session */
751         }
752
753         if (ses->status == CifsExiting) {
754                 /* check if SMB session is bad because we are setting it up */
755                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
756                         return -EAGAIN;
757                 /* else ok - we are shutting down session */
758         }
759
760         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
761         if (*ppmidQ == NULL)
762                 return -ENOMEM;
763         spin_lock(&GlobalMid_Lock);
764         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
765         spin_unlock(&GlobalMid_Lock);
766         return 0;
767 }
768
769 static int
770 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
771 {
772         int error;
773
774         error = wait_event_freezekillable_unsafe(server->response_q,
775                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
776         if (error < 0)
777                 return -ERESTARTSYS;
778
779         return 0;
780 }
781
782 struct mid_q_entry *
783 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
784 {
785         int rc;
786         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
787         struct mid_q_entry *mid;
788
789         if (rqst->rq_iov[0].iov_len != 4 ||
790             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
791                 return ERR_PTR(-EIO);
792
793         /* enable signing if server requires it */
794         if (server->sign)
795                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
796
797         mid = AllocMidQEntry(hdr, server);
798         if (mid == NULL)
799                 return ERR_PTR(-ENOMEM);
800
801         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
802         if (rc) {
803                 DeleteMidQEntry(mid);
804                 return ERR_PTR(rc);
805         }
806
807         return mid;
808 }
809
810 /*
811  * Send a SMB request and set the callback function in the mid to handle
812  * the result. Caller is responsible for dealing with timeouts.
813  */
814 int
815 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
816                 mid_receive_t *receive, mid_callback_t *callback,
817                 mid_handle_t *handle, void *cbdata, const int flags,
818                 const struct cifs_credits *exist_credits)
819 {
820         int rc;
821         struct mid_q_entry *mid;
822         struct cifs_credits credits = { .value = 0, .instance = 0 };
823         unsigned int instance;
824         int optype;
825
826         optype = flags & CIFS_OP_MASK;
827
828         if ((flags & CIFS_HAS_CREDITS) == 0) {
829                 rc = wait_for_free_request(server, flags, &instance);
830                 if (rc)
831                         return rc;
832                 credits.value = 1;
833                 credits.instance = instance;
834         } else
835                 instance = exist_credits->instance;
836
837         mutex_lock(&server->srv_mutex);
838
839         /*
840          * We can't use credits obtained from the previous session to send this
841          * request. Check if there were reconnects after we obtained credits and
842          * return -EAGAIN in such cases to let callers handle it.
843          */
844         if (instance != server->reconnect_instance) {
845                 mutex_unlock(&server->srv_mutex);
846                 add_credits_and_wake_if(server, &credits, optype);
847                 return -EAGAIN;
848         }
849
850         mid = server->ops->setup_async_request(server, rqst);
851         if (IS_ERR(mid)) {
852                 mutex_unlock(&server->srv_mutex);
853                 add_credits_and_wake_if(server, &credits, optype);
854                 return PTR_ERR(mid);
855         }
856
857         mid->receive = receive;
858         mid->callback = callback;
859         mid->callback_data = cbdata;
860         mid->handle = handle;
861         mid->mid_state = MID_REQUEST_SUBMITTED;
862
863         /* put it on the pending_mid_q */
864         spin_lock(&GlobalMid_Lock);
865         list_add_tail(&mid->qhead, &server->pending_mid_q);
866         spin_unlock(&GlobalMid_Lock);
867
868         /*
869          * Need to store the time in mid before calling I/O. For call_async,
870          * I/O response may come back and free the mid entry on another thread.
871          */
872         cifs_save_when_sent(mid);
873         cifs_in_send_inc(server);
874         rc = smb_send_rqst(server, 1, rqst, flags);
875         cifs_in_send_dec(server);
876
877         if (rc < 0) {
878                 revert_current_mid(server, mid->credits);
879                 server->sequence_number -= 2;
880                 cifs_delete_mid(mid);
881         }
882
883         mutex_unlock(&server->srv_mutex);
884
885         if (rc == 0)
886                 return 0;
887
888         add_credits_and_wake_if(server, &credits, optype);
889         return rc;
890 }
891
892 /*
893  *
894  * Send an SMB Request.  No response info (other than return code)
895  * needs to be parsed.
896  *
897  * flags indicate the type of request buffer and how long to wait
898  * and whether to log NT STATUS code (error) before mapping it to POSIX error
899  *
900  */
901 int
902 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
903                  char *in_buf, int flags)
904 {
905         int rc;
906         struct kvec iov[1];
907         struct kvec rsp_iov;
908         int resp_buf_type;
909
910         iov[0].iov_base = in_buf;
911         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
912         flags |= CIFS_NO_RSP_BUF;
913         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
914         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
915
916         return rc;
917 }
918
919 static int
920 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
921 {
922         int rc = 0;
923
924         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
925                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
926
927         spin_lock(&GlobalMid_Lock);
928         switch (mid->mid_state) {
929         case MID_RESPONSE_RECEIVED:
930                 spin_unlock(&GlobalMid_Lock);
931                 return rc;
932         case MID_RETRY_NEEDED:
933                 rc = -EAGAIN;
934                 break;
935         case MID_RESPONSE_MALFORMED:
936                 rc = -EIO;
937                 break;
938         case MID_SHUTDOWN:
939                 rc = -EHOSTDOWN;
940                 break;
941         default:
942                 if (!(mid->mid_flags & MID_DELETED)) {
943                         list_del_init(&mid->qhead);
944                         mid->mid_flags |= MID_DELETED;
945                 }
946                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
947                          __func__, mid->mid, mid->mid_state);
948                 rc = -EIO;
949         }
950         spin_unlock(&GlobalMid_Lock);
951
952         DeleteMidQEntry(mid);
953         return rc;
954 }
955
956 static inline int
957 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
958             struct mid_q_entry *mid)
959 {
960         return server->ops->send_cancel ?
961                                 server->ops->send_cancel(server, rqst, mid) : 0;
962 }
963
964 int
965 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
966                    bool log_error)
967 {
968         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
969
970         dump_smb(mid->resp_buf, min_t(u32, 92, len));
971
972         /* convert the length into a more usable form */
973         if (server->sign) {
974                 struct kvec iov[2];
975                 int rc = 0;
976                 struct smb_rqst rqst = { .rq_iov = iov,
977                                          .rq_nvec = 2 };
978
979                 iov[0].iov_base = mid->resp_buf;
980                 iov[0].iov_len = 4;
981                 iov[1].iov_base = (char *)mid->resp_buf + 4;
982                 iov[1].iov_len = len - 4;
983                 /* FIXME: add code to kill session */
984                 rc = cifs_verify_signature(&rqst, server,
985                                            mid->sequence_number);
986                 if (rc)
987                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
988                                  rc);
989         }
990
991         /* BB special case reconnect tid and uid here? */
992         return map_and_check_smb_error(mid, log_error);
993 }
994
995 struct mid_q_entry *
996 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
997                    struct smb_rqst *rqst)
998 {
999         int rc;
1000         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
1001         struct mid_q_entry *mid;
1002
1003         if (rqst->rq_iov[0].iov_len != 4 ||
1004             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
1005                 return ERR_PTR(-EIO);
1006
1007         rc = allocate_mid(ses, hdr, &mid);
1008         if (rc)
1009                 return ERR_PTR(rc);
1010         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1011         if (rc) {
1012                 cifs_delete_mid(mid);
1013                 return ERR_PTR(rc);
1014         }
1015         return mid;
1016 }
1017
1018 static void
1019 cifs_compound_callback(struct mid_q_entry *mid)
1020 {
1021         struct TCP_Server_Info *server = mid->server;
1022         struct cifs_credits credits;
1023
1024         credits.value = server->ops->get_credits(mid);
1025         credits.instance = server->reconnect_instance;
1026
1027         add_credits(server, &credits, mid->optype);
1028 }
1029
1030 static void
1031 cifs_compound_last_callback(struct mid_q_entry *mid)
1032 {
1033         cifs_compound_callback(mid);
1034         cifs_wake_up_task(mid);
1035 }
1036
1037 static void
1038 cifs_cancelled_callback(struct mid_q_entry *mid)
1039 {
1040         cifs_compound_callback(mid);
1041         DeleteMidQEntry(mid);
1042 }
1043
1044 /*
1045  * Return a channel (master if none) of @ses that can be used to send
1046  * regular requests.
1047  *
1048  * If we are currently binding a new channel (negprot/sess.setup),
1049  * return the new incomplete channel.
1050  */
1051 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1052 {
1053         uint index = 0;
1054
1055         if (!ses)
1056                 return NULL;
1057
1058         if (!ses->binding) {
1059                 /* round robin */
1060                 if (ses->chan_count > 1) {
1061                         index = (uint)atomic_inc_return(&ses->chan_seq);
1062                         index %= ses->chan_count;
1063                 }
1064                 return ses->chans[index].server;
1065         } else {
1066                 return cifs_ses_server(ses);
1067         }
1068 }
1069
1070 int
1071 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1072                    struct TCP_Server_Info *server,
1073                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1074                    int *resp_buf_type, struct kvec *resp_iov)
1075 {
1076         int i, j, optype, rc = 0;
1077         struct mid_q_entry *midQ[MAX_COMPOUND];
1078         bool cancelled_mid[MAX_COMPOUND] = {false};
1079         struct cifs_credits credits[MAX_COMPOUND] = {
1080                 { .value = 0, .instance = 0 }
1081         };
1082         unsigned int instance;
1083         char *buf;
1084
1085         optype = flags & CIFS_OP_MASK;
1086
1087         for (i = 0; i < num_rqst; i++)
1088                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1089
1090         if (!ses || !ses->server || !server) {
1091                 cifs_dbg(VFS, "Null session\n");
1092                 return -EIO;
1093         }
1094
1095         if (server->tcpStatus == CifsExiting)
1096                 return -ENOENT;
1097
1098         /*
1099          * Wait for all the requests to become available.
1100          * This approach still leaves the possibility to be stuck waiting for
1101          * credits if the server doesn't grant credits to the outstanding
1102          * requests and if the client is completely idle, not generating any
1103          * other requests.
1104          * This can be handled by the eventual session reconnect.
1105          */
1106         rc = wait_for_compound_request(server, num_rqst, flags,
1107                                        &instance);
1108         if (rc)
1109                 return rc;
1110
1111         for (i = 0; i < num_rqst; i++) {
1112                 credits[i].value = 1;
1113                 credits[i].instance = instance;
1114         }
1115
1116         /*
1117          * Make sure that we sign in the same order that we send on this socket
1118          * and avoid races inside tcp sendmsg code that could cause corruption
1119          * of smb data.
1120          */
1121
1122         mutex_lock(&server->srv_mutex);
1123
1124         /*
1125          * All the parts of the compound chain belong obtained credits from the
1126          * same session. We can not use credits obtained from the previous
1127          * session to send this request. Check if there were reconnects after
1128          * we obtained credits and return -EAGAIN in such cases to let callers
1129          * handle it.
1130          */
1131         if (instance != server->reconnect_instance) {
1132                 mutex_unlock(&server->srv_mutex);
1133                 for (j = 0; j < num_rqst; j++)
1134                         add_credits(server, &credits[j], optype);
1135                 return -EAGAIN;
1136         }
1137
1138         for (i = 0; i < num_rqst; i++) {
1139                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1140                 if (IS_ERR(midQ[i])) {
1141                         revert_current_mid(server, i);
1142                         for (j = 0; j < i; j++)
1143                                 cifs_delete_mid(midQ[j]);
1144                         mutex_unlock(&server->srv_mutex);
1145
1146                         /* Update # of requests on wire to server */
1147                         for (j = 0; j < num_rqst; j++)
1148                                 add_credits(server, &credits[j], optype);
1149                         return PTR_ERR(midQ[i]);
1150                 }
1151
1152                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1153                 midQ[i]->optype = optype;
1154                 /*
1155                  * Invoke callback for every part of the compound chain
1156                  * to calculate credits properly. Wake up this thread only when
1157                  * the last element is received.
1158                  */
1159                 if (i < num_rqst - 1)
1160                         midQ[i]->callback = cifs_compound_callback;
1161                 else
1162                         midQ[i]->callback = cifs_compound_last_callback;
1163         }
1164         cifs_in_send_inc(server);
1165         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1166         cifs_in_send_dec(server);
1167
1168         for (i = 0; i < num_rqst; i++)
1169                 cifs_save_when_sent(midQ[i]);
1170
1171         if (rc < 0) {
1172                 revert_current_mid(server, num_rqst);
1173                 server->sequence_number -= 2;
1174         }
1175
1176         mutex_unlock(&server->srv_mutex);
1177
1178         /*
1179          * If sending failed for some reason or it is an oplock break that we
1180          * will not receive a response to - return credits back
1181          */
1182         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1183                 for (i = 0; i < num_rqst; i++)
1184                         add_credits(server, &credits[i], optype);
1185                 goto out;
1186         }
1187
1188         /*
1189          * At this point the request is passed to the network stack - we assume
1190          * that any credits taken from the server structure on the client have
1191          * been spent and we can't return them back. Once we receive responses
1192          * we will collect credits granted by the server in the mid callbacks
1193          * and add those credits to the server structure.
1194          */
1195
1196         /*
1197          * Compounding is never used during session establish.
1198          */
1199         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1200                 mutex_lock(&server->srv_mutex);
1201                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1202                                            rqst[0].rq_nvec);
1203                 mutex_unlock(&server->srv_mutex);
1204         }
1205
1206         for (i = 0; i < num_rqst; i++) {
1207                 rc = wait_for_response(server, midQ[i]);
1208                 if (rc != 0)
1209                         break;
1210         }
1211         if (rc != 0) {
1212                 for (; i < num_rqst; i++) {
1213                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1214                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1215                         send_cancel(server, &rqst[i], midQ[i]);
1216                         spin_lock(&GlobalMid_Lock);
1217                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1218                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1219                                 midQ[i]->callback = cifs_cancelled_callback;
1220                                 cancelled_mid[i] = true;
1221                                 credits[i].value = 0;
1222                         }
1223                         spin_unlock(&GlobalMid_Lock);
1224                 }
1225         }
1226
1227         for (i = 0; i < num_rqst; i++) {
1228                 if (rc < 0)
1229                         goto out;
1230
1231                 rc = cifs_sync_mid_result(midQ[i], server);
1232                 if (rc != 0) {
1233                         /* mark this mid as cancelled to not free it below */
1234                         cancelled_mid[i] = true;
1235                         goto out;
1236                 }
1237
1238                 if (!midQ[i]->resp_buf ||
1239                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1240                         rc = -EIO;
1241                         cifs_dbg(FYI, "Bad MID state?\n");
1242                         goto out;
1243                 }
1244
1245                 buf = (char *)midQ[i]->resp_buf;
1246                 resp_iov[i].iov_base = buf;
1247                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1248                         server->vals->header_preamble_size;
1249
1250                 if (midQ[i]->large_buf)
1251                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1252                 else
1253                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1254
1255                 rc = server->ops->check_receive(midQ[i], server,
1256                                                      flags & CIFS_LOG_ERROR);
1257
1258                 /* mark it so buf will not be freed by cifs_delete_mid */
1259                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1260                         midQ[i]->resp_buf = NULL;
1261
1262         }
1263
1264         /*
1265          * Compounding is never used during session establish.
1266          */
1267         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1268                 struct kvec iov = {
1269                         .iov_base = resp_iov[0].iov_base,
1270                         .iov_len = resp_iov[0].iov_len
1271                 };
1272                 mutex_lock(&server->srv_mutex);
1273                 smb311_update_preauth_hash(ses, &iov, 1);
1274                 mutex_unlock(&server->srv_mutex);
1275         }
1276
1277 out:
1278         /*
1279          * This will dequeue all mids. After this it is important that the
1280          * demultiplex_thread will not process any of these mids any futher.
1281          * This is prevented above by using a noop callback that will not
1282          * wake this thread except for the very last PDU.
1283          */
1284         for (i = 0; i < num_rqst; i++) {
1285                 if (!cancelled_mid[i])
1286                         cifs_delete_mid(midQ[i]);
1287         }
1288
1289         return rc;
1290 }
1291
1292 int
1293 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1294                struct TCP_Server_Info *server,
1295                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1296                struct kvec *resp_iov)
1297 {
1298         return compound_send_recv(xid, ses, server, flags, 1,
1299                                   rqst, resp_buf_type, resp_iov);
1300 }
1301
1302 int
1303 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1304              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1305              const int flags, struct kvec *resp_iov)
1306 {
1307         struct smb_rqst rqst;
1308         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1309         int rc;
1310
1311         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1312                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1313                                         GFP_KERNEL);
1314                 if (!new_iov) {
1315                         /* otherwise cifs_send_recv below sets resp_buf_type */
1316                         *resp_buf_type = CIFS_NO_BUFFER;
1317                         return -ENOMEM;
1318                 }
1319         } else
1320                 new_iov = s_iov;
1321
1322         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1323         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1324
1325         new_iov[0].iov_base = new_iov[1].iov_base;
1326         new_iov[0].iov_len = 4;
1327         new_iov[1].iov_base += 4;
1328         new_iov[1].iov_len -= 4;
1329
1330         memset(&rqst, 0, sizeof(struct smb_rqst));
1331         rqst.rq_iov = new_iov;
1332         rqst.rq_nvec = n_vec + 1;
1333
1334         rc = cifs_send_recv(xid, ses, ses->server,
1335                             &rqst, resp_buf_type, flags, resp_iov);
1336         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1337                 kfree(new_iov);
1338         return rc;
1339 }
1340
1341 int
1342 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1343             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1344             int *pbytes_returned, const int flags)
1345 {
1346         int rc = 0;
1347         struct mid_q_entry *midQ;
1348         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1349         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1350         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1351         struct cifs_credits credits = { .value = 1, .instance = 0 };
1352         struct TCP_Server_Info *server;
1353
1354         if (ses == NULL) {
1355                 cifs_dbg(VFS, "Null smb session\n");
1356                 return -EIO;
1357         }
1358         server = ses->server;
1359         if (server == NULL) {
1360                 cifs_dbg(VFS, "Null tcp session\n");
1361                 return -EIO;
1362         }
1363
1364         if (server->tcpStatus == CifsExiting)
1365                 return -ENOENT;
1366
1367         /* Ensure that we do not send more than 50 overlapping requests
1368            to the same server. We may make this configurable later or
1369            use ses->maxReq */
1370
1371         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1372                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1373                                 len);
1374                 return -EIO;
1375         }
1376
1377         rc = wait_for_free_request(server, flags, &credits.instance);
1378         if (rc)
1379                 return rc;
1380
1381         /* make sure that we sign in the same order that we send on this socket
1382            and avoid races inside tcp sendmsg code that could cause corruption
1383            of smb data */
1384
1385         mutex_lock(&server->srv_mutex);
1386
1387         rc = allocate_mid(ses, in_buf, &midQ);
1388         if (rc) {
1389                 mutex_unlock(&server->srv_mutex);
1390                 /* Update # of requests on wire to server */
1391                 add_credits(server, &credits, 0);
1392                 return rc;
1393         }
1394
1395         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1396         if (rc) {
1397                 mutex_unlock(&server->srv_mutex);
1398                 goto out;
1399         }
1400
1401         midQ->mid_state = MID_REQUEST_SUBMITTED;
1402
1403         cifs_in_send_inc(server);
1404         rc = smb_send(server, in_buf, len);
1405         cifs_in_send_dec(server);
1406         cifs_save_when_sent(midQ);
1407
1408         if (rc < 0)
1409                 server->sequence_number -= 2;
1410
1411         mutex_unlock(&server->srv_mutex);
1412
1413         if (rc < 0)
1414                 goto out;
1415
1416         rc = wait_for_response(server, midQ);
1417         if (rc != 0) {
1418                 send_cancel(server, &rqst, midQ);
1419                 spin_lock(&GlobalMid_Lock);
1420                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1421                         /* no longer considered to be "in-flight" */
1422                         midQ->callback = DeleteMidQEntry;
1423                         spin_unlock(&GlobalMid_Lock);
1424                         add_credits(server, &credits, 0);
1425                         return rc;
1426                 }
1427                 spin_unlock(&GlobalMid_Lock);
1428         }
1429
1430         rc = cifs_sync_mid_result(midQ, server);
1431         if (rc != 0) {
1432                 add_credits(server, &credits, 0);
1433                 return rc;
1434         }
1435
1436         if (!midQ->resp_buf || !out_buf ||
1437             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1438                 rc = -EIO;
1439                 cifs_server_dbg(VFS, "Bad MID state?\n");
1440                 goto out;
1441         }
1442
1443         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1444         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1445         rc = cifs_check_receive(midQ, server, 0);
1446 out:
1447         cifs_delete_mid(midQ);
1448         add_credits(server, &credits, 0);
1449
1450         return rc;
1451 }
1452
1453 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1454    blocking lock to return. */
1455
1456 static int
1457 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1458                         struct smb_hdr *in_buf,
1459                         struct smb_hdr *out_buf)
1460 {
1461         int bytes_returned;
1462         struct cifs_ses *ses = tcon->ses;
1463         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1464
1465         /* We just modify the current in_buf to change
1466            the type of lock from LOCKING_ANDX_SHARED_LOCK
1467            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1468            LOCKING_ANDX_CANCEL_LOCK. */
1469
1470         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1471         pSMB->Timeout = 0;
1472         pSMB->hdr.Mid = get_next_mid(ses->server);
1473
1474         return SendReceive(xid, ses, in_buf, out_buf,
1475                         &bytes_returned, 0);
1476 }
1477
1478 int
1479 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1480             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1481             int *pbytes_returned)
1482 {
1483         int rc = 0;
1484         int rstart = 0;
1485         struct mid_q_entry *midQ;
1486         struct cifs_ses *ses;
1487         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1488         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1489         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1490         unsigned int instance;
1491         struct TCP_Server_Info *server;
1492
1493         if (tcon == NULL || tcon->ses == NULL) {
1494                 cifs_dbg(VFS, "Null smb session\n");
1495                 return -EIO;
1496         }
1497         ses = tcon->ses;
1498         server = ses->server;
1499
1500         if (server == NULL) {
1501                 cifs_dbg(VFS, "Null tcp session\n");
1502                 return -EIO;
1503         }
1504
1505         if (server->tcpStatus == CifsExiting)
1506                 return -ENOENT;
1507
1508         /* Ensure that we do not send more than 50 overlapping requests
1509            to the same server. We may make this configurable later or
1510            use ses->maxReq */
1511
1512         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1513                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1514                               len);
1515                 return -EIO;
1516         }
1517
1518         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1519         if (rc)
1520                 return rc;
1521
1522         /* make sure that we sign in the same order that we send on this socket
1523            and avoid races inside tcp sendmsg code that could cause corruption
1524            of smb data */
1525
1526         mutex_lock(&server->srv_mutex);
1527
1528         rc = allocate_mid(ses, in_buf, &midQ);
1529         if (rc) {
1530                 mutex_unlock(&server->srv_mutex);
1531                 return rc;
1532         }
1533
1534         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1535         if (rc) {
1536                 cifs_delete_mid(midQ);
1537                 mutex_unlock(&server->srv_mutex);
1538                 return rc;
1539         }
1540
1541         midQ->mid_state = MID_REQUEST_SUBMITTED;
1542         cifs_in_send_inc(server);
1543         rc = smb_send(server, in_buf, len);
1544         cifs_in_send_dec(server);
1545         cifs_save_when_sent(midQ);
1546
1547         if (rc < 0)
1548                 server->sequence_number -= 2;
1549
1550         mutex_unlock(&server->srv_mutex);
1551
1552         if (rc < 0) {
1553                 cifs_delete_mid(midQ);
1554                 return rc;
1555         }
1556
1557         /* Wait for a reply - allow signals to interrupt. */
1558         rc = wait_event_interruptible(server->response_q,
1559                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1560                 ((server->tcpStatus != CifsGood) &&
1561                  (server->tcpStatus != CifsNew)));
1562
1563         /* Were we interrupted by a signal ? */
1564         if ((rc == -ERESTARTSYS) &&
1565                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1566                 ((server->tcpStatus == CifsGood) ||
1567                  (server->tcpStatus == CifsNew))) {
1568
1569                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1570                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1571                            blocking lock to return. */
1572                         rc = send_cancel(server, &rqst, midQ);
1573                         if (rc) {
1574                                 cifs_delete_mid(midQ);
1575                                 return rc;
1576                         }
1577                 } else {
1578                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1579                            to cause the blocking lock to return. */
1580
1581                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1582
1583                         /* If we get -ENOLCK back the lock may have
1584                            already been removed. Don't exit in this case. */
1585                         if (rc && rc != -ENOLCK) {
1586                                 cifs_delete_mid(midQ);
1587                                 return rc;
1588                         }
1589                 }
1590
1591                 rc = wait_for_response(server, midQ);
1592                 if (rc) {
1593                         send_cancel(server, &rqst, midQ);
1594                         spin_lock(&GlobalMid_Lock);
1595                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1596                                 /* no longer considered to be "in-flight" */
1597                                 midQ->callback = DeleteMidQEntry;
1598                                 spin_unlock(&GlobalMid_Lock);
1599                                 return rc;
1600                         }
1601                         spin_unlock(&GlobalMid_Lock);
1602                 }
1603
1604                 /* We got the response - restart system call. */
1605                 rstart = 1;
1606         }
1607
1608         rc = cifs_sync_mid_result(midQ, server);
1609         if (rc != 0)
1610                 return rc;
1611
1612         /* rcvd frame is ok */
1613         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1614                 rc = -EIO;
1615                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1616                 goto out;
1617         }
1618
1619         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1620         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1621         rc = cifs_check_receive(midQ, server, 0);
1622 out:
1623         cifs_delete_mid(midQ);
1624         if (rstart && rc == -EACCES)
1625                 return -ERESTARTSYS;
1626         return rc;
1627 }