GNU Linux-libre 5.10.219-gnu1
[releases.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         get_task_struct(current);
80         temp->creator = current;
81         temp->callback = cifs_wake_up_task;
82         temp->callback_data = current;
83
84         atomic_inc(&midCount);
85         temp->mid_state = MID_REQUEST_ALLOCATED;
86         return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91         struct mid_q_entry *midEntry =
92                         container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94         __le16 command = midEntry->server->vals->lock_cmd;
95         __u16 smb_cmd = le16_to_cpu(midEntry->command);
96         unsigned long now;
97         unsigned long roundtrip_time;
98 #endif
99         struct TCP_Server_Info *server = midEntry->server;
100
101         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103             server->ops->handle_cancelled_mid)
104                 server->ops->handle_cancelled_mid(midEntry, server);
105
106         midEntry->mid_state = MID_FREE;
107         atomic_dec(&midCount);
108         if (midEntry->large_buf)
109                 cifs_buf_release(midEntry->resp_buf);
110         else
111                 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113         now = jiffies;
114         if (now < midEntry->when_alloc)
115                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116         roundtrip_time = now - midEntry->when_alloc;
117
118         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120                         server->slowest_cmd[smb_cmd] = roundtrip_time;
121                         server->fastest_cmd[smb_cmd] = roundtrip_time;
122                 } else {
123                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
125                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
127                 }
128                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129                 server->time_per_cmd[smb_cmd] += roundtrip_time;
130         }
131         /*
132          * commands taking longer than one second (default) can be indications
133          * that something is wrong, unless it is quite a slow link or a very
134          * busy server. Note that this calc is unlikely or impossible to wrap
135          * as long as slow_rsp_threshold is not set way above recommended max
136          * value (32767 ie 9 hours) and is generally harmless even if wrong
137          * since only affects debug counters - so leaving the calc as simple
138          * comparison rather than doing multiple conversions and overflow
139          * checks
140          */
141         if ((slow_rsp_threshold != 0) &&
142             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143             (midEntry->command != command)) {
144                 /*
145                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146                  * NB: le16_to_cpu returns unsigned so can not be negative below
147                  */
148                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152                                midEntry->when_sent, midEntry->when_received);
153                 if (cifsFYI & CIFS_TIMER) {
154                         pr_debug("slow rsp: cmd %d mid %llu",
155                                  midEntry->command, midEntry->mid);
156                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157                                   now - midEntry->when_alloc,
158                                   now - midEntry->when_sent,
159                                   now - midEntry->when_received);
160                 }
161         }
162 #endif
163         put_task_struct(midEntry->creator);
164
165         mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170         spin_lock(&GlobalMid_Lock);
171         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172         spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177         cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183         spin_lock(&GlobalMid_Lock);
184         if (!(mid->mid_flags & MID_DELETED)) {
185                 list_del_init(&mid->qhead);
186                 mid->mid_flags |= MID_DELETED;
187         }
188         spin_unlock(&GlobalMid_Lock);
189
190         DeleteMidQEntry(mid);
191 }
192
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:     Server to send the data to
196  * @smb_msg:    Message to send
197  * @sent:       amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204               size_t *sent)
205 {
206         int rc = 0;
207         int retries = 0;
208         struct socket *ssocket = server->ssocket;
209
210         *sent = 0;
211
212         if (server->noblocksnd)
213                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
214         else
215                 smb_msg->msg_flags = MSG_NOSIGNAL;
216
217         while (msg_data_left(smb_msg)) {
218                 /*
219                  * If blocking send, we try 3 times, since each can block
220                  * for 5 seconds. For nonblocking  we have to try more
221                  * but wait increasing amounts of time allowing time for
222                  * socket to clear.  The overall time we wait in either
223                  * case to send on the socket is about 15 seconds.
224                  * Similarly we wait for 15 seconds for a response from
225                  * the server in SendReceive[2] for the server to send
226                  * a response back for most types of requests (except
227                  * SMB Write past end of file which can be slow, and
228                  * blocking lock operations). NFS waits slightly longer
229                  * than CIFS, but this can make it take longer for
230                  * nonresponsive servers to be detected and 15 seconds
231                  * is more than enough time for modern networks to
232                  * send a packet.  In most cases if we fail to send
233                  * after the retries we will kill the socket and
234                  * reconnect which may clear the network problem.
235                  */
236                 rc = sock_sendmsg(ssocket, smb_msg);
237                 if (rc == -EAGAIN) {
238                         retries++;
239                         if (retries >= 14 ||
240                             (!server->noblocksnd && (retries > 2))) {
241                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
242                                          ssocket);
243                                 return -EAGAIN;
244                         }
245                         msleep(1 << retries);
246                         continue;
247                 }
248
249                 if (rc < 0)
250                         return rc;
251
252                 if (rc == 0) {
253                         /* should never happen, letting socket clear before
254                            retrying is our only obvious option here */
255                         cifs_server_dbg(VFS, "tcp sent no data\n");
256                         msleep(500);
257                         continue;
258                 }
259
260                 /* send was at least partially successful */
261                 *sent += rc;
262                 retries = 0; /* in case we get ENOSPC on the next send */
263         }
264         return 0;
265 }
266
267 unsigned long
268 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
269 {
270         unsigned int i;
271         struct kvec *iov;
272         int nvec;
273         unsigned long buflen = 0;
274
275         if (server->vals->header_preamble_size == 0 &&
276             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
277                 iov = &rqst->rq_iov[1];
278                 nvec = rqst->rq_nvec - 1;
279         } else {
280                 iov = rqst->rq_iov;
281                 nvec = rqst->rq_nvec;
282         }
283
284         /* total up iov array first */
285         for (i = 0; i < nvec; i++)
286                 buflen += iov[i].iov_len;
287
288         /*
289          * Add in the page array if there is one. The caller needs to make
290          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
291          * multiple pages ends at page boundary, rq_tailsz needs to be set to
292          * PAGE_SIZE.
293          */
294         if (rqst->rq_npages) {
295                 if (rqst->rq_npages == 1)
296                         buflen += rqst->rq_tailsz;
297                 else {
298                         /*
299                          * If there is more than one page, calculate the
300                          * buffer length based on rq_offset and rq_tailsz
301                          */
302                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
303                                         rqst->rq_offset;
304                         buflen += rqst->rq_tailsz;
305                 }
306         }
307
308         return buflen;
309 }
310
311 static int
312 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
313                 struct smb_rqst *rqst)
314 {
315         int rc;
316         struct kvec *iov;
317         int n_vec;
318         unsigned int send_length = 0;
319         unsigned int i, j;
320         sigset_t mask, oldmask;
321         size_t total_len = 0, sent, size;
322         struct socket *ssocket = server->ssocket;
323         struct msghdr smb_msg = {};
324         __be32 rfc1002_marker;
325
326         cifs_in_send_inc(server);
327         if (cifs_rdma_enabled(server)) {
328                 /* return -EAGAIN when connecting or reconnecting */
329                 rc = -EAGAIN;
330                 if (server->smbd_conn)
331                         rc = smbd_send(server, num_rqst, rqst);
332                 goto smbd_done;
333         }
334
335         rc = -EAGAIN;
336         if (ssocket == NULL)
337                 goto out;
338
339         rc = -ERESTARTSYS;
340         if (fatal_signal_pending(current)) {
341                 cifs_dbg(FYI, "signal pending before send request\n");
342                 goto out;
343         }
344
345         rc = 0;
346         /* cork the socket */
347         tcp_sock_set_cork(ssocket->sk, true);
348
349         for (j = 0; j < num_rqst; j++)
350                 send_length += smb_rqst_len(server, &rqst[j]);
351         rfc1002_marker = cpu_to_be32(send_length);
352
353         /*
354          * We should not allow signals to interrupt the network send because
355          * any partial send will cause session reconnects thus increasing
356          * latency of system calls and overload a server with unnecessary
357          * requests.
358          */
359
360         sigfillset(&mask);
361         sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
363         /* Generate a rfc1002 marker for SMB2+ */
364         if (server->vals->header_preamble_size == 0) {
365                 struct kvec hiov = {
366                         .iov_base = &rfc1002_marker,
367                         .iov_len  = 4
368                 };
369                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370                 rc = smb_send_kvec(server, &smb_msg, &sent);
371                 if (rc < 0)
372                         goto unmask;
373
374                 total_len += sent;
375                 send_length += 4;
376         }
377
378         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
380         for (j = 0; j < num_rqst; j++) {
381                 iov = rqst[j].rq_iov;
382                 n_vec = rqst[j].rq_nvec;
383
384                 size = 0;
385                 for (i = 0; i < n_vec; i++) {
386                         dump_smb(iov[i].iov_base, iov[i].iov_len);
387                         size += iov[i].iov_len;
388                 }
389
390                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
391
392                 rc = smb_send_kvec(server, &smb_msg, &sent);
393                 if (rc < 0)
394                         goto unmask;
395
396                 total_len += sent;
397
398                 /* now walk the page array and send each page in it */
399                 for (i = 0; i < rqst[j].rq_npages; i++) {
400                         struct bio_vec bvec;
401
402                         bvec.bv_page = rqst[j].rq_pages[i];
403                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404                                              &bvec.bv_offset);
405
406                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407                                       &bvec, 1, bvec.bv_len);
408                         rc = smb_send_kvec(server, &smb_msg, &sent);
409                         if (rc < 0)
410                                 break;
411
412                         total_len += sent;
413                 }
414         }
415
416 unmask:
417         sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419         /*
420          * If signal is pending but we have already sent the whole packet to
421          * the server we need to return success status to allow a corresponding
422          * mid entry to be kept in the pending requests queue thus allowing
423          * to handle responses from the server by the client.
424          *
425          * If only part of the packet has been sent there is no need to hide
426          * interrupt because the session will be reconnected anyway, so there
427          * won't be any response from the server to handle.
428          */
429
430         if (signal_pending(current) && (total_len != send_length)) {
431                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432                 rc = -ERESTARTSYS;
433         }
434
435         /* uncork it */
436         tcp_sock_set_cork(ssocket->sk, false);
437
438         if ((total_len > 0) && (total_len != send_length)) {
439                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440                          send_length, total_len);
441                 /*
442                  * If we have only sent part of an SMB then the next SMB could
443                  * be taken as the remainder of this one. We need to kill the
444                  * socket so the server throws away the partial SMB
445                  */
446                 server->tcpStatus = CifsNeedReconnect;
447                 trace_smb3_partial_send_reconnect(server->CurrentMid,
448                                                   server->hostname);
449         }
450 smbd_done:
451         if (rc < 0 && rc != -EINTR)
452                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453                          rc);
454         else if (rc > 0)
455                 rc = 0;
456 out:
457         cifs_in_send_dec(server);
458         return rc;
459 }
460
461 static int
462 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
463               struct smb_rqst *rqst, int flags)
464 {
465         struct kvec iov;
466         struct smb2_transform_hdr *tr_hdr;
467         struct smb_rqst cur_rqst[MAX_COMPOUND];
468         int rc;
469
470         if (!(flags & CIFS_TRANSFORM_REQ))
471                 return __smb_send_rqst(server, num_rqst, rqst);
472
473         if (num_rqst > MAX_COMPOUND - 1)
474                 return -ENOMEM;
475
476         if (!server->ops->init_transform_rq) {
477                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
478                 return -EIO;
479         }
480
481         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
482         if (!tr_hdr)
483                 return -ENOMEM;
484
485         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
486         memset(&iov, 0, sizeof(iov));
487         memset(tr_hdr, 0, sizeof(*tr_hdr));
488
489         iov.iov_base = tr_hdr;
490         iov.iov_len = sizeof(*tr_hdr);
491         cur_rqst[0].rq_iov = &iov;
492         cur_rqst[0].rq_nvec = 1;
493
494         rc = server->ops->init_transform_rq(server, num_rqst + 1,
495                                             &cur_rqst[0], rqst);
496         if (rc)
497                 goto out;
498
499         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
500         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
501 out:
502         kfree(tr_hdr);
503         return rc;
504 }
505
506 int
507 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
508          unsigned int smb_buf_length)
509 {
510         struct kvec iov[2];
511         struct smb_rqst rqst = { .rq_iov = iov,
512                                  .rq_nvec = 2 };
513
514         iov[0].iov_base = smb_buffer;
515         iov[0].iov_len = 4;
516         iov[1].iov_base = (char *)smb_buffer + 4;
517         iov[1].iov_len = smb_buf_length;
518
519         return __smb_send_rqst(server, 1, &rqst);
520 }
521
522 static int
523 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
524                       const int timeout, const int flags,
525                       unsigned int *instance)
526 {
527         long rc;
528         int *credits;
529         int optype;
530         long int t;
531
532         if (timeout < 0)
533                 t = MAX_JIFFY_OFFSET;
534         else
535                 t = msecs_to_jiffies(timeout);
536
537         optype = flags & CIFS_OP_MASK;
538
539         *instance = 0;
540
541         credits = server->ops->get_credits_field(server, optype);
542         /* Since an echo is already inflight, no need to wait to send another */
543         if (*credits <= 0 && optype == CIFS_ECHO_OP)
544                 return -EAGAIN;
545
546         spin_lock(&server->req_lock);
547         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
548                 /* oplock breaks must not be held up */
549                 server->in_flight++;
550                 if (server->in_flight > server->max_in_flight)
551                         server->max_in_flight = server->in_flight;
552                 *credits -= 1;
553                 *instance = server->reconnect_instance;
554                 spin_unlock(&server->req_lock);
555                 return 0;
556         }
557
558         while (1) {
559                 if (*credits < num_credits) {
560                         spin_unlock(&server->req_lock);
561                         cifs_num_waiters_inc(server);
562                         rc = wait_event_killable_timeout(server->request_q,
563                                 has_credits(server, credits, num_credits), t);
564                         cifs_num_waiters_dec(server);
565                         if (!rc) {
566                                 trace_smb3_credit_timeout(server->CurrentMid,
567                                         server->hostname, num_credits, 0);
568                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
569                                          timeout);
570                                 return -ENOTSUPP;
571                         }
572                         if (rc == -ERESTARTSYS)
573                                 return -ERESTARTSYS;
574                         spin_lock(&server->req_lock);
575                 } else {
576                         if (server->tcpStatus == CifsExiting) {
577                                 spin_unlock(&server->req_lock);
578                                 return -ENOENT;
579                         }
580
581                         /*
582                          * For normal commands, reserve the last MAX_COMPOUND
583                          * credits to compound requests.
584                          * Otherwise these compounds could be permanently
585                          * starved for credits by single-credit requests.
586                          *
587                          * To prevent spinning CPU, block this thread until
588                          * there are >MAX_COMPOUND credits available.
589                          * But only do this is we already have a lot of
590                          * credits in flight to avoid triggering this check
591                          * for servers that are slow to hand out credits on
592                          * new sessions.
593                          */
594                         if (!optype && num_credits == 1 &&
595                             server->in_flight > 2 * MAX_COMPOUND &&
596                             *credits <= MAX_COMPOUND) {
597                                 spin_unlock(&server->req_lock);
598                                 cifs_num_waiters_inc(server);
599                                 rc = wait_event_killable_timeout(
600                                         server->request_q,
601                                         has_credits(server, credits,
602                                                     MAX_COMPOUND + 1),
603                                         t);
604                                 cifs_num_waiters_dec(server);
605                                 if (!rc) {
606                                         trace_smb3_credit_timeout(
607                                                 server->CurrentMid,
608                                                 server->hostname, num_credits,
609                                                 0);
610                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
611                                                  timeout);
612                                         return -ENOTSUPP;
613                                 }
614                                 if (rc == -ERESTARTSYS)
615                                         return -ERESTARTSYS;
616                                 spin_lock(&server->req_lock);
617                                 continue;
618                         }
619
620                         /*
621                          * Can not count locking commands against total
622                          * as they are allowed to block on server.
623                          */
624
625                         /* update # of requests on the wire to server */
626                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
627                                 *credits -= num_credits;
628                                 server->in_flight += num_credits;
629                                 if (server->in_flight > server->max_in_flight)
630                                         server->max_in_flight = server->in_flight;
631                                 *instance = server->reconnect_instance;
632                         }
633                         spin_unlock(&server->req_lock);
634                         break;
635                 }
636         }
637         return 0;
638 }
639
640 static int
641 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
642                       unsigned int *instance)
643 {
644         return wait_for_free_credits(server, 1, -1, flags,
645                                      instance);
646 }
647
648 static int
649 wait_for_compound_request(struct TCP_Server_Info *server, int num,
650                           const int flags, unsigned int *instance)
651 {
652         int *credits;
653
654         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
655
656         spin_lock(&server->req_lock);
657         if (*credits < num) {
658                 /*
659                  * If the server is tight on resources or just gives us less
660                  * credits for other reasons (e.g. requests are coming out of
661                  * order and the server delays granting more credits until it
662                  * processes a missing mid) and we exhausted most available
663                  * credits there may be situations when we try to send
664                  * a compound request but we don't have enough credits. At this
665                  * point the client needs to decide if it should wait for
666                  * additional credits or fail the request. If at least one
667                  * request is in flight there is a high probability that the
668                  * server will return enough credits to satisfy this compound
669                  * request.
670                  *
671                  * Return immediately if no requests in flight since we will be
672                  * stuck on waiting for credits.
673                  */
674                 if (server->in_flight == 0) {
675                         spin_unlock(&server->req_lock);
676                         return -ENOTSUPP;
677                 }
678         }
679         spin_unlock(&server->req_lock);
680
681         return wait_for_free_credits(server, num, 60000, flags,
682                                      instance);
683 }
684
685 int
686 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
687                       unsigned int *num, struct cifs_credits *credits)
688 {
689         *num = size;
690         credits->value = 0;
691         credits->instance = server->reconnect_instance;
692         return 0;
693 }
694
695 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
696                         struct mid_q_entry **ppmidQ)
697 {
698         if (ses->server->tcpStatus == CifsExiting) {
699                 return -ENOENT;
700         }
701
702         if (ses->server->tcpStatus == CifsNeedReconnect) {
703                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
704                 return -EAGAIN;
705         }
706
707         if (ses->status == CifsNew) {
708                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
709                         (in_buf->Command != SMB_COM_NEGOTIATE))
710                         return -EAGAIN;
711                 /* else ok - we are setting up session */
712         }
713
714         if (ses->status == CifsExiting) {
715                 /* check if SMB session is bad because we are setting it up */
716                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
717                         return -EAGAIN;
718                 /* else ok - we are shutting down session */
719         }
720
721         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
722         if (*ppmidQ == NULL)
723                 return -ENOMEM;
724         spin_lock(&GlobalMid_Lock);
725         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
726         spin_unlock(&GlobalMid_Lock);
727         return 0;
728 }
729
730 static int
731 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
732 {
733         int error;
734
735         error = wait_event_freezekillable_unsafe(server->response_q,
736                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
737         if (error < 0)
738                 return -ERESTARTSYS;
739
740         return 0;
741 }
742
743 struct mid_q_entry *
744 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
745 {
746         int rc;
747         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
748         struct mid_q_entry *mid;
749
750         if (rqst->rq_iov[0].iov_len != 4 ||
751             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
752                 return ERR_PTR(-EIO);
753
754         /* enable signing if server requires it */
755         if (server->sign)
756                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
757
758         mid = AllocMidQEntry(hdr, server);
759         if (mid == NULL)
760                 return ERR_PTR(-ENOMEM);
761
762         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
763         if (rc) {
764                 DeleteMidQEntry(mid);
765                 return ERR_PTR(rc);
766         }
767
768         return mid;
769 }
770
771 /*
772  * Send a SMB request and set the callback function in the mid to handle
773  * the result. Caller is responsible for dealing with timeouts.
774  */
775 int
776 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
777                 mid_receive_t *receive, mid_callback_t *callback,
778                 mid_handle_t *handle, void *cbdata, const int flags,
779                 const struct cifs_credits *exist_credits)
780 {
781         int rc;
782         struct mid_q_entry *mid;
783         struct cifs_credits credits = { .value = 0, .instance = 0 };
784         unsigned int instance;
785         int optype;
786
787         optype = flags & CIFS_OP_MASK;
788
789         if ((flags & CIFS_HAS_CREDITS) == 0) {
790                 rc = wait_for_free_request(server, flags, &instance);
791                 if (rc)
792                         return rc;
793                 credits.value = 1;
794                 credits.instance = instance;
795         } else
796                 instance = exist_credits->instance;
797
798         mutex_lock(&server->srv_mutex);
799
800         /*
801          * We can't use credits obtained from the previous session to send this
802          * request. Check if there were reconnects after we obtained credits and
803          * return -EAGAIN in such cases to let callers handle it.
804          */
805         if (instance != server->reconnect_instance) {
806                 mutex_unlock(&server->srv_mutex);
807                 add_credits_and_wake_if(server, &credits, optype);
808                 return -EAGAIN;
809         }
810
811         mid = server->ops->setup_async_request(server, rqst);
812         if (IS_ERR(mid)) {
813                 mutex_unlock(&server->srv_mutex);
814                 add_credits_and_wake_if(server, &credits, optype);
815                 return PTR_ERR(mid);
816         }
817
818         mid->receive = receive;
819         mid->callback = callback;
820         mid->callback_data = cbdata;
821         mid->handle = handle;
822         mid->mid_state = MID_REQUEST_SUBMITTED;
823
824         /* put it on the pending_mid_q */
825         spin_lock(&GlobalMid_Lock);
826         list_add_tail(&mid->qhead, &server->pending_mid_q);
827         spin_unlock(&GlobalMid_Lock);
828
829         /*
830          * Need to store the time in mid before calling I/O. For call_async,
831          * I/O response may come back and free the mid entry on another thread.
832          */
833         cifs_save_when_sent(mid);
834         rc = smb_send_rqst(server, 1, rqst, flags);
835
836         if (rc < 0) {
837                 revert_current_mid(server, mid->credits);
838                 server->sequence_number -= 2;
839                 cifs_delete_mid(mid);
840         }
841
842         mutex_unlock(&server->srv_mutex);
843
844         if (rc == 0)
845                 return 0;
846
847         add_credits_and_wake_if(server, &credits, optype);
848         return rc;
849 }
850
851 /*
852  *
853  * Send an SMB Request.  No response info (other than return code)
854  * needs to be parsed.
855  *
856  * flags indicate the type of request buffer and how long to wait
857  * and whether to log NT STATUS code (error) before mapping it to POSIX error
858  *
859  */
860 int
861 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
862                  char *in_buf, int flags)
863 {
864         int rc;
865         struct kvec iov[1];
866         struct kvec rsp_iov;
867         int resp_buf_type;
868
869         iov[0].iov_base = in_buf;
870         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
871         flags |= CIFS_NO_RSP_BUF;
872         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
873         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
874
875         return rc;
876 }
877
878 static int
879 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
880 {
881         int rc = 0;
882
883         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
884                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
885
886         spin_lock(&GlobalMid_Lock);
887         switch (mid->mid_state) {
888         case MID_RESPONSE_RECEIVED:
889                 spin_unlock(&GlobalMid_Lock);
890                 return rc;
891         case MID_RETRY_NEEDED:
892                 rc = -EAGAIN;
893                 break;
894         case MID_RESPONSE_MALFORMED:
895                 rc = -EIO;
896                 break;
897         case MID_SHUTDOWN:
898                 rc = -EHOSTDOWN;
899                 break;
900         default:
901                 if (!(mid->mid_flags & MID_DELETED)) {
902                         list_del_init(&mid->qhead);
903                         mid->mid_flags |= MID_DELETED;
904                 }
905                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
906                          __func__, mid->mid, mid->mid_state);
907                 rc = -EIO;
908         }
909         spin_unlock(&GlobalMid_Lock);
910
911         DeleteMidQEntry(mid);
912         return rc;
913 }
914
915 static inline int
916 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
917             struct mid_q_entry *mid)
918 {
919         return server->ops->send_cancel ?
920                                 server->ops->send_cancel(server, rqst, mid) : 0;
921 }
922
923 int
924 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
925                    bool log_error)
926 {
927         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
928
929         dump_smb(mid->resp_buf, min_t(u32, 92, len));
930
931         /* convert the length into a more usable form */
932         if (server->sign) {
933                 struct kvec iov[2];
934                 int rc = 0;
935                 struct smb_rqst rqst = { .rq_iov = iov,
936                                          .rq_nvec = 2 };
937
938                 iov[0].iov_base = mid->resp_buf;
939                 iov[0].iov_len = 4;
940                 iov[1].iov_base = (char *)mid->resp_buf + 4;
941                 iov[1].iov_len = len - 4;
942                 /* FIXME: add code to kill session */
943                 rc = cifs_verify_signature(&rqst, server,
944                                            mid->sequence_number);
945                 if (rc)
946                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
947                                  rc);
948         }
949
950         /* BB special case reconnect tid and uid here? */
951         return map_and_check_smb_error(mid, log_error);
952 }
953
954 struct mid_q_entry *
955 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
956                    struct smb_rqst *rqst)
957 {
958         int rc;
959         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
960         struct mid_q_entry *mid;
961
962         if (rqst->rq_iov[0].iov_len != 4 ||
963             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
964                 return ERR_PTR(-EIO);
965
966         rc = allocate_mid(ses, hdr, &mid);
967         if (rc)
968                 return ERR_PTR(rc);
969         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
970         if (rc) {
971                 cifs_delete_mid(mid);
972                 return ERR_PTR(rc);
973         }
974         return mid;
975 }
976
977 static void
978 cifs_compound_callback(struct mid_q_entry *mid)
979 {
980         struct TCP_Server_Info *server = mid->server;
981         struct cifs_credits credits;
982
983         credits.value = server->ops->get_credits(mid);
984         credits.instance = server->reconnect_instance;
985
986         add_credits(server, &credits, mid->optype);
987 }
988
989 static void
990 cifs_compound_last_callback(struct mid_q_entry *mid)
991 {
992         cifs_compound_callback(mid);
993         cifs_wake_up_task(mid);
994 }
995
996 static void
997 cifs_cancelled_callback(struct mid_q_entry *mid)
998 {
999         cifs_compound_callback(mid);
1000         DeleteMidQEntry(mid);
1001 }
1002
1003 /*
1004  * Return a channel (master if none) of @ses that can be used to send
1005  * regular requests.
1006  *
1007  * If we are currently binding a new channel (negprot/sess.setup),
1008  * return the new incomplete channel.
1009  */
1010 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1011 {
1012         uint index = 0;
1013
1014         if (!ses)
1015                 return NULL;
1016
1017         if (!ses->binding) {
1018                 /* round robin */
1019                 if (ses->chan_count > 1) {
1020                         index = (uint)atomic_inc_return(&ses->chan_seq);
1021                         index %= ses->chan_count;
1022                 }
1023                 return ses->chans[index].server;
1024         } else {
1025                 return cifs_ses_server(ses);
1026         }
1027 }
1028
1029 int
1030 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1031                    struct TCP_Server_Info *server,
1032                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1033                    int *resp_buf_type, struct kvec *resp_iov)
1034 {
1035         int i, j, optype, rc = 0;
1036         struct mid_q_entry *midQ[MAX_COMPOUND];
1037         bool cancelled_mid[MAX_COMPOUND] = {false};
1038         struct cifs_credits credits[MAX_COMPOUND] = {
1039                 { .value = 0, .instance = 0 }
1040         };
1041         unsigned int instance;
1042         char *buf;
1043
1044         optype = flags & CIFS_OP_MASK;
1045
1046         for (i = 0; i < num_rqst; i++)
1047                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1048
1049         if (!ses || !ses->server || !server) {
1050                 cifs_dbg(VFS, "Null session\n");
1051                 return -EIO;
1052         }
1053
1054         if (server->tcpStatus == CifsExiting)
1055                 return -ENOENT;
1056
1057         /*
1058          * Wait for all the requests to become available.
1059          * This approach still leaves the possibility to be stuck waiting for
1060          * credits if the server doesn't grant credits to the outstanding
1061          * requests and if the client is completely idle, not generating any
1062          * other requests.
1063          * This can be handled by the eventual session reconnect.
1064          */
1065         rc = wait_for_compound_request(server, num_rqst, flags,
1066                                        &instance);
1067         if (rc)
1068                 return rc;
1069
1070         for (i = 0; i < num_rqst; i++) {
1071                 credits[i].value = 1;
1072                 credits[i].instance = instance;
1073         }
1074
1075         /*
1076          * Make sure that we sign in the same order that we send on this socket
1077          * and avoid races inside tcp sendmsg code that could cause corruption
1078          * of smb data.
1079          */
1080
1081         mutex_lock(&server->srv_mutex);
1082
1083         /*
1084          * All the parts of the compound chain belong obtained credits from the
1085          * same session. We can not use credits obtained from the previous
1086          * session to send this request. Check if there were reconnects after
1087          * we obtained credits and return -EAGAIN in such cases to let callers
1088          * handle it.
1089          */
1090         if (instance != server->reconnect_instance) {
1091                 mutex_unlock(&server->srv_mutex);
1092                 for (j = 0; j < num_rqst; j++)
1093                         add_credits(server, &credits[j], optype);
1094                 return -EAGAIN;
1095         }
1096
1097         for (i = 0; i < num_rqst; i++) {
1098                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1099                 if (IS_ERR(midQ[i])) {
1100                         revert_current_mid(server, i);
1101                         for (j = 0; j < i; j++)
1102                                 cifs_delete_mid(midQ[j]);
1103                         mutex_unlock(&server->srv_mutex);
1104
1105                         /* Update # of requests on wire to server */
1106                         for (j = 0; j < num_rqst; j++)
1107                                 add_credits(server, &credits[j], optype);
1108                         return PTR_ERR(midQ[i]);
1109                 }
1110
1111                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1112                 midQ[i]->optype = optype;
1113                 /*
1114                  * Invoke callback for every part of the compound chain
1115                  * to calculate credits properly. Wake up this thread only when
1116                  * the last element is received.
1117                  */
1118                 if (i < num_rqst - 1)
1119                         midQ[i]->callback = cifs_compound_callback;
1120                 else
1121                         midQ[i]->callback = cifs_compound_last_callback;
1122         }
1123         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1124
1125         for (i = 0; i < num_rqst; i++)
1126                 cifs_save_when_sent(midQ[i]);
1127
1128         if (rc < 0) {
1129                 revert_current_mid(server, num_rqst);
1130                 server->sequence_number -= 2;
1131         }
1132
1133         mutex_unlock(&server->srv_mutex);
1134
1135         /*
1136          * If sending failed for some reason or it is an oplock break that we
1137          * will not receive a response to - return credits back
1138          */
1139         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1140                 for (i = 0; i < num_rqst; i++)
1141                         add_credits(server, &credits[i], optype);
1142                 goto out;
1143         }
1144
1145         /*
1146          * At this point the request is passed to the network stack - we assume
1147          * that any credits taken from the server structure on the client have
1148          * been spent and we can't return them back. Once we receive responses
1149          * we will collect credits granted by the server in the mid callbacks
1150          * and add those credits to the server structure.
1151          */
1152
1153         /*
1154          * Compounding is never used during session establish.
1155          */
1156         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1157                 mutex_lock(&server->srv_mutex);
1158                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1159                                            rqst[0].rq_nvec);
1160                 mutex_unlock(&server->srv_mutex);
1161         }
1162
1163         for (i = 0; i < num_rqst; i++) {
1164                 rc = wait_for_response(server, midQ[i]);
1165                 if (rc != 0)
1166                         break;
1167         }
1168         if (rc != 0) {
1169                 for (; i < num_rqst; i++) {
1170                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1171                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1172                         send_cancel(server, &rqst[i], midQ[i]);
1173                         spin_lock(&GlobalMid_Lock);
1174                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1175                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1176                                 midQ[i]->callback = cifs_cancelled_callback;
1177                                 cancelled_mid[i] = true;
1178                                 credits[i].value = 0;
1179                         }
1180                         spin_unlock(&GlobalMid_Lock);
1181                 }
1182         }
1183
1184         for (i = 0; i < num_rqst; i++) {
1185                 if (rc < 0)
1186                         goto out;
1187
1188                 rc = cifs_sync_mid_result(midQ[i], server);
1189                 if (rc != 0) {
1190                         /* mark this mid as cancelled to not free it below */
1191                         cancelled_mid[i] = true;
1192                         goto out;
1193                 }
1194
1195                 if (!midQ[i]->resp_buf ||
1196                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1197                         rc = -EIO;
1198                         cifs_dbg(FYI, "Bad MID state?\n");
1199                         goto out;
1200                 }
1201
1202                 buf = (char *)midQ[i]->resp_buf;
1203                 resp_iov[i].iov_base = buf;
1204                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1205                         server->vals->header_preamble_size;
1206
1207                 if (midQ[i]->large_buf)
1208                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1209                 else
1210                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1211
1212                 rc = server->ops->check_receive(midQ[i], server,
1213                                                      flags & CIFS_LOG_ERROR);
1214
1215                 /* mark it so buf will not be freed by cifs_delete_mid */
1216                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1217                         midQ[i]->resp_buf = NULL;
1218
1219         }
1220
1221         /*
1222          * Compounding is never used during session establish.
1223          */
1224         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1225                 struct kvec iov = {
1226                         .iov_base = resp_iov[0].iov_base,
1227                         .iov_len = resp_iov[0].iov_len
1228                 };
1229                 mutex_lock(&server->srv_mutex);
1230                 smb311_update_preauth_hash(ses, &iov, 1);
1231                 mutex_unlock(&server->srv_mutex);
1232         }
1233
1234 out:
1235         /*
1236          * This will dequeue all mids. After this it is important that the
1237          * demultiplex_thread will not process any of these mids any futher.
1238          * This is prevented above by using a noop callback that will not
1239          * wake this thread except for the very last PDU.
1240          */
1241         for (i = 0; i < num_rqst; i++) {
1242                 if (!cancelled_mid[i])
1243                         cifs_delete_mid(midQ[i]);
1244         }
1245
1246         return rc;
1247 }
1248
1249 int
1250 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1251                struct TCP_Server_Info *server,
1252                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1253                struct kvec *resp_iov)
1254 {
1255         return compound_send_recv(xid, ses, server, flags, 1,
1256                                   rqst, resp_buf_type, resp_iov);
1257 }
1258
1259 int
1260 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1261              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1262              const int flags, struct kvec *resp_iov)
1263 {
1264         struct smb_rqst rqst;
1265         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1266         int rc;
1267
1268         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1269                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1270                                         GFP_KERNEL);
1271                 if (!new_iov) {
1272                         /* otherwise cifs_send_recv below sets resp_buf_type */
1273                         *resp_buf_type = CIFS_NO_BUFFER;
1274                         return -ENOMEM;
1275                 }
1276         } else
1277                 new_iov = s_iov;
1278
1279         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1280         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1281
1282         new_iov[0].iov_base = new_iov[1].iov_base;
1283         new_iov[0].iov_len = 4;
1284         new_iov[1].iov_base += 4;
1285         new_iov[1].iov_len -= 4;
1286
1287         memset(&rqst, 0, sizeof(struct smb_rqst));
1288         rqst.rq_iov = new_iov;
1289         rqst.rq_nvec = n_vec + 1;
1290
1291         rc = cifs_send_recv(xid, ses, ses->server,
1292                             &rqst, resp_buf_type, flags, resp_iov);
1293         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1294                 kfree(new_iov);
1295         return rc;
1296 }
1297
1298 int
1299 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1300             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1301             int *pbytes_returned, const int flags)
1302 {
1303         int rc = 0;
1304         struct mid_q_entry *midQ;
1305         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1306         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1307         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1308         struct cifs_credits credits = { .value = 1, .instance = 0 };
1309         struct TCP_Server_Info *server;
1310
1311         if (ses == NULL) {
1312                 cifs_dbg(VFS, "Null smb session\n");
1313                 return -EIO;
1314         }
1315         server = ses->server;
1316         if (server == NULL) {
1317                 cifs_dbg(VFS, "Null tcp session\n");
1318                 return -EIO;
1319         }
1320
1321         if (server->tcpStatus == CifsExiting)
1322                 return -ENOENT;
1323
1324         /* Ensure that we do not send more than 50 overlapping requests
1325            to the same server. We may make this configurable later or
1326            use ses->maxReq */
1327
1328         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1329                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1330                                 len);
1331                 return -EIO;
1332         }
1333
1334         rc = wait_for_free_request(server, flags, &credits.instance);
1335         if (rc)
1336                 return rc;
1337
1338         /* make sure that we sign in the same order that we send on this socket
1339            and avoid races inside tcp sendmsg code that could cause corruption
1340            of smb data */
1341
1342         mutex_lock(&server->srv_mutex);
1343
1344         rc = allocate_mid(ses, in_buf, &midQ);
1345         if (rc) {
1346                 mutex_unlock(&server->srv_mutex);
1347                 /* Update # of requests on wire to server */
1348                 add_credits(server, &credits, 0);
1349                 return rc;
1350         }
1351
1352         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1353         if (rc) {
1354                 mutex_unlock(&server->srv_mutex);
1355                 goto out;
1356         }
1357
1358         midQ->mid_state = MID_REQUEST_SUBMITTED;
1359
1360         rc = smb_send(server, in_buf, len);
1361         cifs_save_when_sent(midQ);
1362
1363         if (rc < 0)
1364                 server->sequence_number -= 2;
1365
1366         mutex_unlock(&server->srv_mutex);
1367
1368         if (rc < 0)
1369                 goto out;
1370
1371         rc = wait_for_response(server, midQ);
1372         if (rc != 0) {
1373                 send_cancel(server, &rqst, midQ);
1374                 spin_lock(&GlobalMid_Lock);
1375                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1376                         /* no longer considered to be "in-flight" */
1377                         midQ->callback = DeleteMidQEntry;
1378                         spin_unlock(&GlobalMid_Lock);
1379                         add_credits(server, &credits, 0);
1380                         return rc;
1381                 }
1382                 spin_unlock(&GlobalMid_Lock);
1383         }
1384
1385         rc = cifs_sync_mid_result(midQ, server);
1386         if (rc != 0) {
1387                 add_credits(server, &credits, 0);
1388                 return rc;
1389         }
1390
1391         if (!midQ->resp_buf || !out_buf ||
1392             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1393                 rc = -EIO;
1394                 cifs_server_dbg(VFS, "Bad MID state?\n");
1395                 goto out;
1396         }
1397
1398         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1399         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1400         rc = cifs_check_receive(midQ, server, 0);
1401 out:
1402         cifs_delete_mid(midQ);
1403         add_credits(server, &credits, 0);
1404
1405         return rc;
1406 }
1407
1408 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1409    blocking lock to return. */
1410
1411 static int
1412 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1413                         struct smb_hdr *in_buf,
1414                         struct smb_hdr *out_buf)
1415 {
1416         int bytes_returned;
1417         struct cifs_ses *ses = tcon->ses;
1418         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1419
1420         /* We just modify the current in_buf to change
1421            the type of lock from LOCKING_ANDX_SHARED_LOCK
1422            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1423            LOCKING_ANDX_CANCEL_LOCK. */
1424
1425         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1426         pSMB->Timeout = 0;
1427         pSMB->hdr.Mid = get_next_mid(ses->server);
1428
1429         return SendReceive(xid, ses, in_buf, out_buf,
1430                         &bytes_returned, 0);
1431 }
1432
1433 int
1434 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1435             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1436             int *pbytes_returned)
1437 {
1438         int rc = 0;
1439         int rstart = 0;
1440         struct mid_q_entry *midQ;
1441         struct cifs_ses *ses;
1442         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1443         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1444         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1445         unsigned int instance;
1446         struct TCP_Server_Info *server;
1447
1448         if (tcon == NULL || tcon->ses == NULL) {
1449                 cifs_dbg(VFS, "Null smb session\n");
1450                 return -EIO;
1451         }
1452         ses = tcon->ses;
1453         server = ses->server;
1454
1455         if (server == NULL) {
1456                 cifs_dbg(VFS, "Null tcp session\n");
1457                 return -EIO;
1458         }
1459
1460         if (server->tcpStatus == CifsExiting)
1461                 return -ENOENT;
1462
1463         /* Ensure that we do not send more than 50 overlapping requests
1464            to the same server. We may make this configurable later or
1465            use ses->maxReq */
1466
1467         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1468                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1469                               len);
1470                 return -EIO;
1471         }
1472
1473         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1474         if (rc)
1475                 return rc;
1476
1477         /* make sure that we sign in the same order that we send on this socket
1478            and avoid races inside tcp sendmsg code that could cause corruption
1479            of smb data */
1480
1481         mutex_lock(&server->srv_mutex);
1482
1483         rc = allocate_mid(ses, in_buf, &midQ);
1484         if (rc) {
1485                 mutex_unlock(&server->srv_mutex);
1486                 return rc;
1487         }
1488
1489         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1490         if (rc) {
1491                 cifs_delete_mid(midQ);
1492                 mutex_unlock(&server->srv_mutex);
1493                 return rc;
1494         }
1495
1496         midQ->mid_state = MID_REQUEST_SUBMITTED;
1497         rc = smb_send(server, in_buf, len);
1498         cifs_save_when_sent(midQ);
1499
1500         if (rc < 0)
1501                 server->sequence_number -= 2;
1502
1503         mutex_unlock(&server->srv_mutex);
1504
1505         if (rc < 0) {
1506                 cifs_delete_mid(midQ);
1507                 return rc;
1508         }
1509
1510         /* Wait for a reply - allow signals to interrupt. */
1511         rc = wait_event_interruptible(server->response_q,
1512                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1513                 ((server->tcpStatus != CifsGood) &&
1514                  (server->tcpStatus != CifsNew)));
1515
1516         /* Were we interrupted by a signal ? */
1517         if ((rc == -ERESTARTSYS) &&
1518                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1519                 ((server->tcpStatus == CifsGood) ||
1520                  (server->tcpStatus == CifsNew))) {
1521
1522                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1523                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1524                            blocking lock to return. */
1525                         rc = send_cancel(server, &rqst, midQ);
1526                         if (rc) {
1527                                 cifs_delete_mid(midQ);
1528                                 return rc;
1529                         }
1530                 } else {
1531                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1532                            to cause the blocking lock to return. */
1533
1534                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1535
1536                         /* If we get -ENOLCK back the lock may have
1537                            already been removed. Don't exit in this case. */
1538                         if (rc && rc != -ENOLCK) {
1539                                 cifs_delete_mid(midQ);
1540                                 return rc;
1541                         }
1542                 }
1543
1544                 rc = wait_for_response(server, midQ);
1545                 if (rc) {
1546                         send_cancel(server, &rqst, midQ);
1547                         spin_lock(&GlobalMid_Lock);
1548                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1549                                 /* no longer considered to be "in-flight" */
1550                                 midQ->callback = DeleteMidQEntry;
1551                                 spin_unlock(&GlobalMid_Lock);
1552                                 return rc;
1553                         }
1554                         spin_unlock(&GlobalMid_Lock);
1555                 }
1556
1557                 /* We got the response - restart system call. */
1558                 rstart = 1;
1559         }
1560
1561         rc = cifs_sync_mid_result(midQ, server);
1562         if (rc != 0)
1563                 return rc;
1564
1565         /* rcvd frame is ok */
1566         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1567                 rc = -EIO;
1568                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1569                 goto out;
1570         }
1571
1572         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1573         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1574         rc = cifs_check_receive(midQ, server, 0);
1575 out:
1576         cifs_delete_mid(midQ);
1577         if (rstart && rc == -EACCES)
1578                 return -ERESTARTSYS;
1579         return rc;
1580 }