GNU Linux-libre 5.10.153-gnu1
[releases.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         get_task_struct(current);
80         temp->creator = current;
81         temp->callback = cifs_wake_up_task;
82         temp->callback_data = current;
83
84         atomic_inc(&midCount);
85         temp->mid_state = MID_REQUEST_ALLOCATED;
86         return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91         struct mid_q_entry *midEntry =
92                         container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94         __le16 command = midEntry->server->vals->lock_cmd;
95         __u16 smb_cmd = le16_to_cpu(midEntry->command);
96         unsigned long now;
97         unsigned long roundtrip_time;
98 #endif
99         struct TCP_Server_Info *server = midEntry->server;
100
101         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103             server->ops->handle_cancelled_mid)
104                 server->ops->handle_cancelled_mid(midEntry, server);
105
106         midEntry->mid_state = MID_FREE;
107         atomic_dec(&midCount);
108         if (midEntry->large_buf)
109                 cifs_buf_release(midEntry->resp_buf);
110         else
111                 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113         now = jiffies;
114         if (now < midEntry->when_alloc)
115                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116         roundtrip_time = now - midEntry->when_alloc;
117
118         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120                         server->slowest_cmd[smb_cmd] = roundtrip_time;
121                         server->fastest_cmd[smb_cmd] = roundtrip_time;
122                 } else {
123                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
125                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
127                 }
128                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129                 server->time_per_cmd[smb_cmd] += roundtrip_time;
130         }
131         /*
132          * commands taking longer than one second (default) can be indications
133          * that something is wrong, unless it is quite a slow link or a very
134          * busy server. Note that this calc is unlikely or impossible to wrap
135          * as long as slow_rsp_threshold is not set way above recommended max
136          * value (32767 ie 9 hours) and is generally harmless even if wrong
137          * since only affects debug counters - so leaving the calc as simple
138          * comparison rather than doing multiple conversions and overflow
139          * checks
140          */
141         if ((slow_rsp_threshold != 0) &&
142             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143             (midEntry->command != command)) {
144                 /*
145                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146                  * NB: le16_to_cpu returns unsigned so can not be negative below
147                  */
148                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152                                midEntry->when_sent, midEntry->when_received);
153                 if (cifsFYI & CIFS_TIMER) {
154                         pr_debug("slow rsp: cmd %d mid %llu",
155                                  midEntry->command, midEntry->mid);
156                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157                                   now - midEntry->when_alloc,
158                                   now - midEntry->when_sent,
159                                   now - midEntry->when_received);
160                 }
161         }
162 #endif
163         put_task_struct(midEntry->creator);
164
165         mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170         spin_lock(&GlobalMid_Lock);
171         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172         spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177         cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183         spin_lock(&GlobalMid_Lock);
184         if (!(mid->mid_flags & MID_DELETED)) {
185                 list_del_init(&mid->qhead);
186                 mid->mid_flags |= MID_DELETED;
187         }
188         spin_unlock(&GlobalMid_Lock);
189
190         DeleteMidQEntry(mid);
191 }
192
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:     Server to send the data to
196  * @smb_msg:    Message to send
197  * @sent:       amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204               size_t *sent)
205 {
206         int rc = 0;
207         int retries = 0;
208         struct socket *ssocket = server->ssocket;
209
210         *sent = 0;
211
212         if (server->noblocksnd)
213                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
214         else
215                 smb_msg->msg_flags = MSG_NOSIGNAL;
216
217         while (msg_data_left(smb_msg)) {
218                 /*
219                  * If blocking send, we try 3 times, since each can block
220                  * for 5 seconds. For nonblocking  we have to try more
221                  * but wait increasing amounts of time allowing time for
222                  * socket to clear.  The overall time we wait in either
223                  * case to send on the socket is about 15 seconds.
224                  * Similarly we wait for 15 seconds for a response from
225                  * the server in SendReceive[2] for the server to send
226                  * a response back for most types of requests (except
227                  * SMB Write past end of file which can be slow, and
228                  * blocking lock operations). NFS waits slightly longer
229                  * than CIFS, but this can make it take longer for
230                  * nonresponsive servers to be detected and 15 seconds
231                  * is more than enough time for modern networks to
232                  * send a packet.  In most cases if we fail to send
233                  * after the retries we will kill the socket and
234                  * reconnect which may clear the network problem.
235                  */
236                 rc = sock_sendmsg(ssocket, smb_msg);
237                 if (rc == -EAGAIN) {
238                         retries++;
239                         if (retries >= 14 ||
240                             (!server->noblocksnd && (retries > 2))) {
241                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
242                                          ssocket);
243                                 return -EAGAIN;
244                         }
245                         msleep(1 << retries);
246                         continue;
247                 }
248
249                 if (rc < 0)
250                         return rc;
251
252                 if (rc == 0) {
253                         /* should never happen, letting socket clear before
254                            retrying is our only obvious option here */
255                         cifs_server_dbg(VFS, "tcp sent no data\n");
256                         msleep(500);
257                         continue;
258                 }
259
260                 /* send was at least partially successful */
261                 *sent += rc;
262                 retries = 0; /* in case we get ENOSPC on the next send */
263         }
264         return 0;
265 }
266
267 unsigned long
268 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
269 {
270         unsigned int i;
271         struct kvec *iov;
272         int nvec;
273         unsigned long buflen = 0;
274
275         if (server->vals->header_preamble_size == 0 &&
276             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
277                 iov = &rqst->rq_iov[1];
278                 nvec = rqst->rq_nvec - 1;
279         } else {
280                 iov = rqst->rq_iov;
281                 nvec = rqst->rq_nvec;
282         }
283
284         /* total up iov array first */
285         for (i = 0; i < nvec; i++)
286                 buflen += iov[i].iov_len;
287
288         /*
289          * Add in the page array if there is one. The caller needs to make
290          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
291          * multiple pages ends at page boundary, rq_tailsz needs to be set to
292          * PAGE_SIZE.
293          */
294         if (rqst->rq_npages) {
295                 if (rqst->rq_npages == 1)
296                         buflen += rqst->rq_tailsz;
297                 else {
298                         /*
299                          * If there is more than one page, calculate the
300                          * buffer length based on rq_offset and rq_tailsz
301                          */
302                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
303                                         rqst->rq_offset;
304                         buflen += rqst->rq_tailsz;
305                 }
306         }
307
308         return buflen;
309 }
310
311 static int
312 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
313                 struct smb_rqst *rqst)
314 {
315         int rc = 0;
316         struct kvec *iov;
317         int n_vec;
318         unsigned int send_length = 0;
319         unsigned int i, j;
320         sigset_t mask, oldmask;
321         size_t total_len = 0, sent, size;
322         struct socket *ssocket = server->ssocket;
323         struct msghdr smb_msg = {};
324         __be32 rfc1002_marker;
325
326         if (cifs_rdma_enabled(server)) {
327                 /* return -EAGAIN when connecting or reconnecting */
328                 rc = -EAGAIN;
329                 if (server->smbd_conn)
330                         rc = smbd_send(server, num_rqst, rqst);
331                 goto smbd_done;
332         }
333
334         if (ssocket == NULL)
335                 return -EAGAIN;
336
337         if (fatal_signal_pending(current)) {
338                 cifs_dbg(FYI, "signal pending before send request\n");
339                 return -ERESTARTSYS;
340         }
341
342         /* cork the socket */
343         tcp_sock_set_cork(ssocket->sk, true);
344
345         for (j = 0; j < num_rqst; j++)
346                 send_length += smb_rqst_len(server, &rqst[j]);
347         rfc1002_marker = cpu_to_be32(send_length);
348
349         /*
350          * We should not allow signals to interrupt the network send because
351          * any partial send will cause session reconnects thus increasing
352          * latency of system calls and overload a server with unnecessary
353          * requests.
354          */
355
356         sigfillset(&mask);
357         sigprocmask(SIG_BLOCK, &mask, &oldmask);
358
359         /* Generate a rfc1002 marker for SMB2+ */
360         if (server->vals->header_preamble_size == 0) {
361                 struct kvec hiov = {
362                         .iov_base = &rfc1002_marker,
363                         .iov_len  = 4
364                 };
365                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
366                 rc = smb_send_kvec(server, &smb_msg, &sent);
367                 if (rc < 0)
368                         goto unmask;
369
370                 total_len += sent;
371                 send_length += 4;
372         }
373
374         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
375
376         for (j = 0; j < num_rqst; j++) {
377                 iov = rqst[j].rq_iov;
378                 n_vec = rqst[j].rq_nvec;
379
380                 size = 0;
381                 for (i = 0; i < n_vec; i++) {
382                         dump_smb(iov[i].iov_base, iov[i].iov_len);
383                         size += iov[i].iov_len;
384                 }
385
386                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
387
388                 rc = smb_send_kvec(server, &smb_msg, &sent);
389                 if (rc < 0)
390                         goto unmask;
391
392                 total_len += sent;
393
394                 /* now walk the page array and send each page in it */
395                 for (i = 0; i < rqst[j].rq_npages; i++) {
396                         struct bio_vec bvec;
397
398                         bvec.bv_page = rqst[j].rq_pages[i];
399                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
400                                              &bvec.bv_offset);
401
402                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
403                                       &bvec, 1, bvec.bv_len);
404                         rc = smb_send_kvec(server, &smb_msg, &sent);
405                         if (rc < 0)
406                                 break;
407
408                         total_len += sent;
409                 }
410         }
411
412 unmask:
413         sigprocmask(SIG_SETMASK, &oldmask, NULL);
414
415         /*
416          * If signal is pending but we have already sent the whole packet to
417          * the server we need to return success status to allow a corresponding
418          * mid entry to be kept in the pending requests queue thus allowing
419          * to handle responses from the server by the client.
420          *
421          * If only part of the packet has been sent there is no need to hide
422          * interrupt because the session will be reconnected anyway, so there
423          * won't be any response from the server to handle.
424          */
425
426         if (signal_pending(current) && (total_len != send_length)) {
427                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
428                 rc = -ERESTARTSYS;
429         }
430
431         /* uncork it */
432         tcp_sock_set_cork(ssocket->sk, false);
433
434         if ((total_len > 0) && (total_len != send_length)) {
435                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
436                          send_length, total_len);
437                 /*
438                  * If we have only sent part of an SMB then the next SMB could
439                  * be taken as the remainder of this one. We need to kill the
440                  * socket so the server throws away the partial SMB
441                  */
442                 server->tcpStatus = CifsNeedReconnect;
443                 trace_smb3_partial_send_reconnect(server->CurrentMid,
444                                                   server->hostname);
445         }
446 smbd_done:
447         if (rc < 0 && rc != -EINTR)
448                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
449                          rc);
450         else if (rc > 0)
451                 rc = 0;
452
453         return rc;
454 }
455
456 static int
457 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
458               struct smb_rqst *rqst, int flags)
459 {
460         struct kvec iov;
461         struct smb2_transform_hdr *tr_hdr;
462         struct smb_rqst cur_rqst[MAX_COMPOUND];
463         int rc;
464
465         if (!(flags & CIFS_TRANSFORM_REQ))
466                 return __smb_send_rqst(server, num_rqst, rqst);
467
468         if (num_rqst > MAX_COMPOUND - 1)
469                 return -ENOMEM;
470
471         if (!server->ops->init_transform_rq) {
472                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
473                 return -EIO;
474         }
475
476         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
477         if (!tr_hdr)
478                 return -ENOMEM;
479
480         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
481         memset(&iov, 0, sizeof(iov));
482         memset(tr_hdr, 0, sizeof(*tr_hdr));
483
484         iov.iov_base = tr_hdr;
485         iov.iov_len = sizeof(*tr_hdr);
486         cur_rqst[0].rq_iov = &iov;
487         cur_rqst[0].rq_nvec = 1;
488
489         rc = server->ops->init_transform_rq(server, num_rqst + 1,
490                                             &cur_rqst[0], rqst);
491         if (rc)
492                 goto out;
493
494         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
495         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
496 out:
497         kfree(tr_hdr);
498         return rc;
499 }
500
501 int
502 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
503          unsigned int smb_buf_length)
504 {
505         struct kvec iov[2];
506         struct smb_rqst rqst = { .rq_iov = iov,
507                                  .rq_nvec = 2 };
508
509         iov[0].iov_base = smb_buffer;
510         iov[0].iov_len = 4;
511         iov[1].iov_base = (char *)smb_buffer + 4;
512         iov[1].iov_len = smb_buf_length;
513
514         return __smb_send_rqst(server, 1, &rqst);
515 }
516
517 static int
518 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
519                       const int timeout, const int flags,
520                       unsigned int *instance)
521 {
522         long rc;
523         int *credits;
524         int optype;
525         long int t;
526
527         if (timeout < 0)
528                 t = MAX_JIFFY_OFFSET;
529         else
530                 t = msecs_to_jiffies(timeout);
531
532         optype = flags & CIFS_OP_MASK;
533
534         *instance = 0;
535
536         credits = server->ops->get_credits_field(server, optype);
537         /* Since an echo is already inflight, no need to wait to send another */
538         if (*credits <= 0 && optype == CIFS_ECHO_OP)
539                 return -EAGAIN;
540
541         spin_lock(&server->req_lock);
542         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
543                 /* oplock breaks must not be held up */
544                 server->in_flight++;
545                 if (server->in_flight > server->max_in_flight)
546                         server->max_in_flight = server->in_flight;
547                 *credits -= 1;
548                 *instance = server->reconnect_instance;
549                 spin_unlock(&server->req_lock);
550                 return 0;
551         }
552
553         while (1) {
554                 if (*credits < num_credits) {
555                         spin_unlock(&server->req_lock);
556                         cifs_num_waiters_inc(server);
557                         rc = wait_event_killable_timeout(server->request_q,
558                                 has_credits(server, credits, num_credits), t);
559                         cifs_num_waiters_dec(server);
560                         if (!rc) {
561                                 trace_smb3_credit_timeout(server->CurrentMid,
562                                         server->hostname, num_credits, 0);
563                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
564                                          timeout);
565                                 return -ENOTSUPP;
566                         }
567                         if (rc == -ERESTARTSYS)
568                                 return -ERESTARTSYS;
569                         spin_lock(&server->req_lock);
570                 } else {
571                         if (server->tcpStatus == CifsExiting) {
572                                 spin_unlock(&server->req_lock);
573                                 return -ENOENT;
574                         }
575
576                         /*
577                          * For normal commands, reserve the last MAX_COMPOUND
578                          * credits to compound requests.
579                          * Otherwise these compounds could be permanently
580                          * starved for credits by single-credit requests.
581                          *
582                          * To prevent spinning CPU, block this thread until
583                          * there are >MAX_COMPOUND credits available.
584                          * But only do this is we already have a lot of
585                          * credits in flight to avoid triggering this check
586                          * for servers that are slow to hand out credits on
587                          * new sessions.
588                          */
589                         if (!optype && num_credits == 1 &&
590                             server->in_flight > 2 * MAX_COMPOUND &&
591                             *credits <= MAX_COMPOUND) {
592                                 spin_unlock(&server->req_lock);
593                                 cifs_num_waiters_inc(server);
594                                 rc = wait_event_killable_timeout(
595                                         server->request_q,
596                                         has_credits(server, credits,
597                                                     MAX_COMPOUND + 1),
598                                         t);
599                                 cifs_num_waiters_dec(server);
600                                 if (!rc) {
601                                         trace_smb3_credit_timeout(
602                                                 server->CurrentMid,
603                                                 server->hostname, num_credits,
604                                                 0);
605                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
606                                                  timeout);
607                                         return -ENOTSUPP;
608                                 }
609                                 if (rc == -ERESTARTSYS)
610                                         return -ERESTARTSYS;
611                                 spin_lock(&server->req_lock);
612                                 continue;
613                         }
614
615                         /*
616                          * Can not count locking commands against total
617                          * as they are allowed to block on server.
618                          */
619
620                         /* update # of requests on the wire to server */
621                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
622                                 *credits -= num_credits;
623                                 server->in_flight += num_credits;
624                                 if (server->in_flight > server->max_in_flight)
625                                         server->max_in_flight = server->in_flight;
626                                 *instance = server->reconnect_instance;
627                         }
628                         spin_unlock(&server->req_lock);
629                         break;
630                 }
631         }
632         return 0;
633 }
634
635 static int
636 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
637                       unsigned int *instance)
638 {
639         return wait_for_free_credits(server, 1, -1, flags,
640                                      instance);
641 }
642
643 static int
644 wait_for_compound_request(struct TCP_Server_Info *server, int num,
645                           const int flags, unsigned int *instance)
646 {
647         int *credits;
648
649         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
650
651         spin_lock(&server->req_lock);
652         if (*credits < num) {
653                 /*
654                  * If the server is tight on resources or just gives us less
655                  * credits for other reasons (e.g. requests are coming out of
656                  * order and the server delays granting more credits until it
657                  * processes a missing mid) and we exhausted most available
658                  * credits there may be situations when we try to send
659                  * a compound request but we don't have enough credits. At this
660                  * point the client needs to decide if it should wait for
661                  * additional credits or fail the request. If at least one
662                  * request is in flight there is a high probability that the
663                  * server will return enough credits to satisfy this compound
664                  * request.
665                  *
666                  * Return immediately if no requests in flight since we will be
667                  * stuck on waiting for credits.
668                  */
669                 if (server->in_flight == 0) {
670                         spin_unlock(&server->req_lock);
671                         return -ENOTSUPP;
672                 }
673         }
674         spin_unlock(&server->req_lock);
675
676         return wait_for_free_credits(server, num, 60000, flags,
677                                      instance);
678 }
679
680 int
681 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
682                       unsigned int *num, struct cifs_credits *credits)
683 {
684         *num = size;
685         credits->value = 0;
686         credits->instance = server->reconnect_instance;
687         return 0;
688 }
689
690 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
691                         struct mid_q_entry **ppmidQ)
692 {
693         if (ses->server->tcpStatus == CifsExiting) {
694                 return -ENOENT;
695         }
696
697         if (ses->server->tcpStatus == CifsNeedReconnect) {
698                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
699                 return -EAGAIN;
700         }
701
702         if (ses->status == CifsNew) {
703                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
704                         (in_buf->Command != SMB_COM_NEGOTIATE))
705                         return -EAGAIN;
706                 /* else ok - we are setting up session */
707         }
708
709         if (ses->status == CifsExiting) {
710                 /* check if SMB session is bad because we are setting it up */
711                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
712                         return -EAGAIN;
713                 /* else ok - we are shutting down session */
714         }
715
716         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
717         if (*ppmidQ == NULL)
718                 return -ENOMEM;
719         spin_lock(&GlobalMid_Lock);
720         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
721         spin_unlock(&GlobalMid_Lock);
722         return 0;
723 }
724
725 static int
726 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
727 {
728         int error;
729
730         error = wait_event_freezekillable_unsafe(server->response_q,
731                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
732         if (error < 0)
733                 return -ERESTARTSYS;
734
735         return 0;
736 }
737
738 struct mid_q_entry *
739 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
740 {
741         int rc;
742         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
743         struct mid_q_entry *mid;
744
745         if (rqst->rq_iov[0].iov_len != 4 ||
746             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
747                 return ERR_PTR(-EIO);
748
749         /* enable signing if server requires it */
750         if (server->sign)
751                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
752
753         mid = AllocMidQEntry(hdr, server);
754         if (mid == NULL)
755                 return ERR_PTR(-ENOMEM);
756
757         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
758         if (rc) {
759                 DeleteMidQEntry(mid);
760                 return ERR_PTR(rc);
761         }
762
763         return mid;
764 }
765
766 /*
767  * Send a SMB request and set the callback function in the mid to handle
768  * the result. Caller is responsible for dealing with timeouts.
769  */
770 int
771 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
772                 mid_receive_t *receive, mid_callback_t *callback,
773                 mid_handle_t *handle, void *cbdata, const int flags,
774                 const struct cifs_credits *exist_credits)
775 {
776         int rc;
777         struct mid_q_entry *mid;
778         struct cifs_credits credits = { .value = 0, .instance = 0 };
779         unsigned int instance;
780         int optype;
781
782         optype = flags & CIFS_OP_MASK;
783
784         if ((flags & CIFS_HAS_CREDITS) == 0) {
785                 rc = wait_for_free_request(server, flags, &instance);
786                 if (rc)
787                         return rc;
788                 credits.value = 1;
789                 credits.instance = instance;
790         } else
791                 instance = exist_credits->instance;
792
793         mutex_lock(&server->srv_mutex);
794
795         /*
796          * We can't use credits obtained from the previous session to send this
797          * request. Check if there were reconnects after we obtained credits and
798          * return -EAGAIN in such cases to let callers handle it.
799          */
800         if (instance != server->reconnect_instance) {
801                 mutex_unlock(&server->srv_mutex);
802                 add_credits_and_wake_if(server, &credits, optype);
803                 return -EAGAIN;
804         }
805
806         mid = server->ops->setup_async_request(server, rqst);
807         if (IS_ERR(mid)) {
808                 mutex_unlock(&server->srv_mutex);
809                 add_credits_and_wake_if(server, &credits, optype);
810                 return PTR_ERR(mid);
811         }
812
813         mid->receive = receive;
814         mid->callback = callback;
815         mid->callback_data = cbdata;
816         mid->handle = handle;
817         mid->mid_state = MID_REQUEST_SUBMITTED;
818
819         /* put it on the pending_mid_q */
820         spin_lock(&GlobalMid_Lock);
821         list_add_tail(&mid->qhead, &server->pending_mid_q);
822         spin_unlock(&GlobalMid_Lock);
823
824         /*
825          * Need to store the time in mid before calling I/O. For call_async,
826          * I/O response may come back and free the mid entry on another thread.
827          */
828         cifs_save_when_sent(mid);
829         cifs_in_send_inc(server);
830         rc = smb_send_rqst(server, 1, rqst, flags);
831         cifs_in_send_dec(server);
832
833         if (rc < 0) {
834                 revert_current_mid(server, mid->credits);
835                 server->sequence_number -= 2;
836                 cifs_delete_mid(mid);
837         }
838
839         mutex_unlock(&server->srv_mutex);
840
841         if (rc == 0)
842                 return 0;
843
844         add_credits_and_wake_if(server, &credits, optype);
845         return rc;
846 }
847
848 /*
849  *
850  * Send an SMB Request.  No response info (other than return code)
851  * needs to be parsed.
852  *
853  * flags indicate the type of request buffer and how long to wait
854  * and whether to log NT STATUS code (error) before mapping it to POSIX error
855  *
856  */
857 int
858 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
859                  char *in_buf, int flags)
860 {
861         int rc;
862         struct kvec iov[1];
863         struct kvec rsp_iov;
864         int resp_buf_type;
865
866         iov[0].iov_base = in_buf;
867         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
868         flags |= CIFS_NO_RSP_BUF;
869         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
870         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
871
872         return rc;
873 }
874
875 static int
876 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
877 {
878         int rc = 0;
879
880         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
881                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
882
883         spin_lock(&GlobalMid_Lock);
884         switch (mid->mid_state) {
885         case MID_RESPONSE_RECEIVED:
886                 spin_unlock(&GlobalMid_Lock);
887                 return rc;
888         case MID_RETRY_NEEDED:
889                 rc = -EAGAIN;
890                 break;
891         case MID_RESPONSE_MALFORMED:
892                 rc = -EIO;
893                 break;
894         case MID_SHUTDOWN:
895                 rc = -EHOSTDOWN;
896                 break;
897         default:
898                 if (!(mid->mid_flags & MID_DELETED)) {
899                         list_del_init(&mid->qhead);
900                         mid->mid_flags |= MID_DELETED;
901                 }
902                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
903                          __func__, mid->mid, mid->mid_state);
904                 rc = -EIO;
905         }
906         spin_unlock(&GlobalMid_Lock);
907
908         DeleteMidQEntry(mid);
909         return rc;
910 }
911
912 static inline int
913 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
914             struct mid_q_entry *mid)
915 {
916         return server->ops->send_cancel ?
917                                 server->ops->send_cancel(server, rqst, mid) : 0;
918 }
919
920 int
921 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
922                    bool log_error)
923 {
924         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
925
926         dump_smb(mid->resp_buf, min_t(u32, 92, len));
927
928         /* convert the length into a more usable form */
929         if (server->sign) {
930                 struct kvec iov[2];
931                 int rc = 0;
932                 struct smb_rqst rqst = { .rq_iov = iov,
933                                          .rq_nvec = 2 };
934
935                 iov[0].iov_base = mid->resp_buf;
936                 iov[0].iov_len = 4;
937                 iov[1].iov_base = (char *)mid->resp_buf + 4;
938                 iov[1].iov_len = len - 4;
939                 /* FIXME: add code to kill session */
940                 rc = cifs_verify_signature(&rqst, server,
941                                            mid->sequence_number);
942                 if (rc)
943                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
944                                  rc);
945         }
946
947         /* BB special case reconnect tid and uid here? */
948         return map_and_check_smb_error(mid, log_error);
949 }
950
951 struct mid_q_entry *
952 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
953                    struct smb_rqst *rqst)
954 {
955         int rc;
956         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
957         struct mid_q_entry *mid;
958
959         if (rqst->rq_iov[0].iov_len != 4 ||
960             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
961                 return ERR_PTR(-EIO);
962
963         rc = allocate_mid(ses, hdr, &mid);
964         if (rc)
965                 return ERR_PTR(rc);
966         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
967         if (rc) {
968                 cifs_delete_mid(mid);
969                 return ERR_PTR(rc);
970         }
971         return mid;
972 }
973
974 static void
975 cifs_compound_callback(struct mid_q_entry *mid)
976 {
977         struct TCP_Server_Info *server = mid->server;
978         struct cifs_credits credits;
979
980         credits.value = server->ops->get_credits(mid);
981         credits.instance = server->reconnect_instance;
982
983         add_credits(server, &credits, mid->optype);
984 }
985
986 static void
987 cifs_compound_last_callback(struct mid_q_entry *mid)
988 {
989         cifs_compound_callback(mid);
990         cifs_wake_up_task(mid);
991 }
992
993 static void
994 cifs_cancelled_callback(struct mid_q_entry *mid)
995 {
996         cifs_compound_callback(mid);
997         DeleteMidQEntry(mid);
998 }
999
1000 /*
1001  * Return a channel (master if none) of @ses that can be used to send
1002  * regular requests.
1003  *
1004  * If we are currently binding a new channel (negprot/sess.setup),
1005  * return the new incomplete channel.
1006  */
1007 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1008 {
1009         uint index = 0;
1010
1011         if (!ses)
1012                 return NULL;
1013
1014         if (!ses->binding) {
1015                 /* round robin */
1016                 if (ses->chan_count > 1) {
1017                         index = (uint)atomic_inc_return(&ses->chan_seq);
1018                         index %= ses->chan_count;
1019                 }
1020                 return ses->chans[index].server;
1021         } else {
1022                 return cifs_ses_server(ses);
1023         }
1024 }
1025
1026 int
1027 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1028                    struct TCP_Server_Info *server,
1029                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1030                    int *resp_buf_type, struct kvec *resp_iov)
1031 {
1032         int i, j, optype, rc = 0;
1033         struct mid_q_entry *midQ[MAX_COMPOUND];
1034         bool cancelled_mid[MAX_COMPOUND] = {false};
1035         struct cifs_credits credits[MAX_COMPOUND] = {
1036                 { .value = 0, .instance = 0 }
1037         };
1038         unsigned int instance;
1039         char *buf;
1040
1041         optype = flags & CIFS_OP_MASK;
1042
1043         for (i = 0; i < num_rqst; i++)
1044                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1045
1046         if (!ses || !ses->server || !server) {
1047                 cifs_dbg(VFS, "Null session\n");
1048                 return -EIO;
1049         }
1050
1051         if (server->tcpStatus == CifsExiting)
1052                 return -ENOENT;
1053
1054         /*
1055          * Wait for all the requests to become available.
1056          * This approach still leaves the possibility to be stuck waiting for
1057          * credits if the server doesn't grant credits to the outstanding
1058          * requests and if the client is completely idle, not generating any
1059          * other requests.
1060          * This can be handled by the eventual session reconnect.
1061          */
1062         rc = wait_for_compound_request(server, num_rqst, flags,
1063                                        &instance);
1064         if (rc)
1065                 return rc;
1066
1067         for (i = 0; i < num_rqst; i++) {
1068                 credits[i].value = 1;
1069                 credits[i].instance = instance;
1070         }
1071
1072         /*
1073          * Make sure that we sign in the same order that we send on this socket
1074          * and avoid races inside tcp sendmsg code that could cause corruption
1075          * of smb data.
1076          */
1077
1078         mutex_lock(&server->srv_mutex);
1079
1080         /*
1081          * All the parts of the compound chain belong obtained credits from the
1082          * same session. We can not use credits obtained from the previous
1083          * session to send this request. Check if there were reconnects after
1084          * we obtained credits and return -EAGAIN in such cases to let callers
1085          * handle it.
1086          */
1087         if (instance != server->reconnect_instance) {
1088                 mutex_unlock(&server->srv_mutex);
1089                 for (j = 0; j < num_rqst; j++)
1090                         add_credits(server, &credits[j], optype);
1091                 return -EAGAIN;
1092         }
1093
1094         for (i = 0; i < num_rqst; i++) {
1095                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1096                 if (IS_ERR(midQ[i])) {
1097                         revert_current_mid(server, i);
1098                         for (j = 0; j < i; j++)
1099                                 cifs_delete_mid(midQ[j]);
1100                         mutex_unlock(&server->srv_mutex);
1101
1102                         /* Update # of requests on wire to server */
1103                         for (j = 0; j < num_rqst; j++)
1104                                 add_credits(server, &credits[j], optype);
1105                         return PTR_ERR(midQ[i]);
1106                 }
1107
1108                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1109                 midQ[i]->optype = optype;
1110                 /*
1111                  * Invoke callback for every part of the compound chain
1112                  * to calculate credits properly. Wake up this thread only when
1113                  * the last element is received.
1114                  */
1115                 if (i < num_rqst - 1)
1116                         midQ[i]->callback = cifs_compound_callback;
1117                 else
1118                         midQ[i]->callback = cifs_compound_last_callback;
1119         }
1120         cifs_in_send_inc(server);
1121         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1122         cifs_in_send_dec(server);
1123
1124         for (i = 0; i < num_rqst; i++)
1125                 cifs_save_when_sent(midQ[i]);
1126
1127         if (rc < 0) {
1128                 revert_current_mid(server, num_rqst);
1129                 server->sequence_number -= 2;
1130         }
1131
1132         mutex_unlock(&server->srv_mutex);
1133
1134         /*
1135          * If sending failed for some reason or it is an oplock break that we
1136          * will not receive a response to - return credits back
1137          */
1138         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1139                 for (i = 0; i < num_rqst; i++)
1140                         add_credits(server, &credits[i], optype);
1141                 goto out;
1142         }
1143
1144         /*
1145          * At this point the request is passed to the network stack - we assume
1146          * that any credits taken from the server structure on the client have
1147          * been spent and we can't return them back. Once we receive responses
1148          * we will collect credits granted by the server in the mid callbacks
1149          * and add those credits to the server structure.
1150          */
1151
1152         /*
1153          * Compounding is never used during session establish.
1154          */
1155         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1156                 mutex_lock(&server->srv_mutex);
1157                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1158                                            rqst[0].rq_nvec);
1159                 mutex_unlock(&server->srv_mutex);
1160         }
1161
1162         for (i = 0; i < num_rqst; i++) {
1163                 rc = wait_for_response(server, midQ[i]);
1164                 if (rc != 0)
1165                         break;
1166         }
1167         if (rc != 0) {
1168                 for (; i < num_rqst; i++) {
1169                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1170                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1171                         send_cancel(server, &rqst[i], midQ[i]);
1172                         spin_lock(&GlobalMid_Lock);
1173                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1174                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1175                                 midQ[i]->callback = cifs_cancelled_callback;
1176                                 cancelled_mid[i] = true;
1177                                 credits[i].value = 0;
1178                         }
1179                         spin_unlock(&GlobalMid_Lock);
1180                 }
1181         }
1182
1183         for (i = 0; i < num_rqst; i++) {
1184                 if (rc < 0)
1185                         goto out;
1186
1187                 rc = cifs_sync_mid_result(midQ[i], server);
1188                 if (rc != 0) {
1189                         /* mark this mid as cancelled to not free it below */
1190                         cancelled_mid[i] = true;
1191                         goto out;
1192                 }
1193
1194                 if (!midQ[i]->resp_buf ||
1195                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1196                         rc = -EIO;
1197                         cifs_dbg(FYI, "Bad MID state?\n");
1198                         goto out;
1199                 }
1200
1201                 buf = (char *)midQ[i]->resp_buf;
1202                 resp_iov[i].iov_base = buf;
1203                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1204                         server->vals->header_preamble_size;
1205
1206                 if (midQ[i]->large_buf)
1207                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1208                 else
1209                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1210
1211                 rc = server->ops->check_receive(midQ[i], server,
1212                                                      flags & CIFS_LOG_ERROR);
1213
1214                 /* mark it so buf will not be freed by cifs_delete_mid */
1215                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1216                         midQ[i]->resp_buf = NULL;
1217
1218         }
1219
1220         /*
1221          * Compounding is never used during session establish.
1222          */
1223         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1224                 struct kvec iov = {
1225                         .iov_base = resp_iov[0].iov_base,
1226                         .iov_len = resp_iov[0].iov_len
1227                 };
1228                 mutex_lock(&server->srv_mutex);
1229                 smb311_update_preauth_hash(ses, &iov, 1);
1230                 mutex_unlock(&server->srv_mutex);
1231         }
1232
1233 out:
1234         /*
1235          * This will dequeue all mids. After this it is important that the
1236          * demultiplex_thread will not process any of these mids any futher.
1237          * This is prevented above by using a noop callback that will not
1238          * wake this thread except for the very last PDU.
1239          */
1240         for (i = 0; i < num_rqst; i++) {
1241                 if (!cancelled_mid[i])
1242                         cifs_delete_mid(midQ[i]);
1243         }
1244
1245         return rc;
1246 }
1247
1248 int
1249 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1250                struct TCP_Server_Info *server,
1251                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1252                struct kvec *resp_iov)
1253 {
1254         return compound_send_recv(xid, ses, server, flags, 1,
1255                                   rqst, resp_buf_type, resp_iov);
1256 }
1257
1258 int
1259 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1260              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1261              const int flags, struct kvec *resp_iov)
1262 {
1263         struct smb_rqst rqst;
1264         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1265         int rc;
1266
1267         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1268                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1269                                         GFP_KERNEL);
1270                 if (!new_iov) {
1271                         /* otherwise cifs_send_recv below sets resp_buf_type */
1272                         *resp_buf_type = CIFS_NO_BUFFER;
1273                         return -ENOMEM;
1274                 }
1275         } else
1276                 new_iov = s_iov;
1277
1278         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1279         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1280
1281         new_iov[0].iov_base = new_iov[1].iov_base;
1282         new_iov[0].iov_len = 4;
1283         new_iov[1].iov_base += 4;
1284         new_iov[1].iov_len -= 4;
1285
1286         memset(&rqst, 0, sizeof(struct smb_rqst));
1287         rqst.rq_iov = new_iov;
1288         rqst.rq_nvec = n_vec + 1;
1289
1290         rc = cifs_send_recv(xid, ses, ses->server,
1291                             &rqst, resp_buf_type, flags, resp_iov);
1292         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1293                 kfree(new_iov);
1294         return rc;
1295 }
1296
1297 int
1298 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1299             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1300             int *pbytes_returned, const int flags)
1301 {
1302         int rc = 0;
1303         struct mid_q_entry *midQ;
1304         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1305         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1306         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1307         struct cifs_credits credits = { .value = 1, .instance = 0 };
1308         struct TCP_Server_Info *server;
1309
1310         if (ses == NULL) {
1311                 cifs_dbg(VFS, "Null smb session\n");
1312                 return -EIO;
1313         }
1314         server = ses->server;
1315         if (server == NULL) {
1316                 cifs_dbg(VFS, "Null tcp session\n");
1317                 return -EIO;
1318         }
1319
1320         if (server->tcpStatus == CifsExiting)
1321                 return -ENOENT;
1322
1323         /* Ensure that we do not send more than 50 overlapping requests
1324            to the same server. We may make this configurable later or
1325            use ses->maxReq */
1326
1327         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1328                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1329                                 len);
1330                 return -EIO;
1331         }
1332
1333         rc = wait_for_free_request(server, flags, &credits.instance);
1334         if (rc)
1335                 return rc;
1336
1337         /* make sure that we sign in the same order that we send on this socket
1338            and avoid races inside tcp sendmsg code that could cause corruption
1339            of smb data */
1340
1341         mutex_lock(&server->srv_mutex);
1342
1343         rc = allocate_mid(ses, in_buf, &midQ);
1344         if (rc) {
1345                 mutex_unlock(&server->srv_mutex);
1346                 /* Update # of requests on wire to server */
1347                 add_credits(server, &credits, 0);
1348                 return rc;
1349         }
1350
1351         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1352         if (rc) {
1353                 mutex_unlock(&server->srv_mutex);
1354                 goto out;
1355         }
1356
1357         midQ->mid_state = MID_REQUEST_SUBMITTED;
1358
1359         cifs_in_send_inc(server);
1360         rc = smb_send(server, in_buf, len);
1361         cifs_in_send_dec(server);
1362         cifs_save_when_sent(midQ);
1363
1364         if (rc < 0)
1365                 server->sequence_number -= 2;
1366
1367         mutex_unlock(&server->srv_mutex);
1368
1369         if (rc < 0)
1370                 goto out;
1371
1372         rc = wait_for_response(server, midQ);
1373         if (rc != 0) {
1374                 send_cancel(server, &rqst, midQ);
1375                 spin_lock(&GlobalMid_Lock);
1376                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1377                         /* no longer considered to be "in-flight" */
1378                         midQ->callback = DeleteMidQEntry;
1379                         spin_unlock(&GlobalMid_Lock);
1380                         add_credits(server, &credits, 0);
1381                         return rc;
1382                 }
1383                 spin_unlock(&GlobalMid_Lock);
1384         }
1385
1386         rc = cifs_sync_mid_result(midQ, server);
1387         if (rc != 0) {
1388                 add_credits(server, &credits, 0);
1389                 return rc;
1390         }
1391
1392         if (!midQ->resp_buf || !out_buf ||
1393             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1394                 rc = -EIO;
1395                 cifs_server_dbg(VFS, "Bad MID state?\n");
1396                 goto out;
1397         }
1398
1399         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1400         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1401         rc = cifs_check_receive(midQ, server, 0);
1402 out:
1403         cifs_delete_mid(midQ);
1404         add_credits(server, &credits, 0);
1405
1406         return rc;
1407 }
1408
1409 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1410    blocking lock to return. */
1411
1412 static int
1413 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1414                         struct smb_hdr *in_buf,
1415                         struct smb_hdr *out_buf)
1416 {
1417         int bytes_returned;
1418         struct cifs_ses *ses = tcon->ses;
1419         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1420
1421         /* We just modify the current in_buf to change
1422            the type of lock from LOCKING_ANDX_SHARED_LOCK
1423            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1424            LOCKING_ANDX_CANCEL_LOCK. */
1425
1426         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1427         pSMB->Timeout = 0;
1428         pSMB->hdr.Mid = get_next_mid(ses->server);
1429
1430         return SendReceive(xid, ses, in_buf, out_buf,
1431                         &bytes_returned, 0);
1432 }
1433
1434 int
1435 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1436             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1437             int *pbytes_returned)
1438 {
1439         int rc = 0;
1440         int rstart = 0;
1441         struct mid_q_entry *midQ;
1442         struct cifs_ses *ses;
1443         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1444         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1445         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1446         unsigned int instance;
1447         struct TCP_Server_Info *server;
1448
1449         if (tcon == NULL || tcon->ses == NULL) {
1450                 cifs_dbg(VFS, "Null smb session\n");
1451                 return -EIO;
1452         }
1453         ses = tcon->ses;
1454         server = ses->server;
1455
1456         if (server == NULL) {
1457                 cifs_dbg(VFS, "Null tcp session\n");
1458                 return -EIO;
1459         }
1460
1461         if (server->tcpStatus == CifsExiting)
1462                 return -ENOENT;
1463
1464         /* Ensure that we do not send more than 50 overlapping requests
1465            to the same server. We may make this configurable later or
1466            use ses->maxReq */
1467
1468         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1469                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1470                               len);
1471                 return -EIO;
1472         }
1473
1474         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1475         if (rc)
1476                 return rc;
1477
1478         /* make sure that we sign in the same order that we send on this socket
1479            and avoid races inside tcp sendmsg code that could cause corruption
1480            of smb data */
1481
1482         mutex_lock(&server->srv_mutex);
1483
1484         rc = allocate_mid(ses, in_buf, &midQ);
1485         if (rc) {
1486                 mutex_unlock(&server->srv_mutex);
1487                 return rc;
1488         }
1489
1490         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1491         if (rc) {
1492                 cifs_delete_mid(midQ);
1493                 mutex_unlock(&server->srv_mutex);
1494                 return rc;
1495         }
1496
1497         midQ->mid_state = MID_REQUEST_SUBMITTED;
1498         cifs_in_send_inc(server);
1499         rc = smb_send(server, in_buf, len);
1500         cifs_in_send_dec(server);
1501         cifs_save_when_sent(midQ);
1502
1503         if (rc < 0)
1504                 server->sequence_number -= 2;
1505
1506         mutex_unlock(&server->srv_mutex);
1507
1508         if (rc < 0) {
1509                 cifs_delete_mid(midQ);
1510                 return rc;
1511         }
1512
1513         /* Wait for a reply - allow signals to interrupt. */
1514         rc = wait_event_interruptible(server->response_q,
1515                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1516                 ((server->tcpStatus != CifsGood) &&
1517                  (server->tcpStatus != CifsNew)));
1518
1519         /* Were we interrupted by a signal ? */
1520         if ((rc == -ERESTARTSYS) &&
1521                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1522                 ((server->tcpStatus == CifsGood) ||
1523                  (server->tcpStatus == CifsNew))) {
1524
1525                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1526                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1527                            blocking lock to return. */
1528                         rc = send_cancel(server, &rqst, midQ);
1529                         if (rc) {
1530                                 cifs_delete_mid(midQ);
1531                                 return rc;
1532                         }
1533                 } else {
1534                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1535                            to cause the blocking lock to return. */
1536
1537                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1538
1539                         /* If we get -ENOLCK back the lock may have
1540                            already been removed. Don't exit in this case. */
1541                         if (rc && rc != -ENOLCK) {
1542                                 cifs_delete_mid(midQ);
1543                                 return rc;
1544                         }
1545                 }
1546
1547                 rc = wait_for_response(server, midQ);
1548                 if (rc) {
1549                         send_cancel(server, &rqst, midQ);
1550                         spin_lock(&GlobalMid_Lock);
1551                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1552                                 /* no longer considered to be "in-flight" */
1553                                 midQ->callback = DeleteMidQEntry;
1554                                 spin_unlock(&GlobalMid_Lock);
1555                                 return rc;
1556                         }
1557                         spin_unlock(&GlobalMid_Lock);
1558                 }
1559
1560                 /* We got the response - restart system call. */
1561                 rstart = 1;
1562         }
1563
1564         rc = cifs_sync_mid_result(midQ, server);
1565         if (rc != 0)
1566                 return rc;
1567
1568         /* rcvd frame is ok */
1569         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1570                 rc = -EIO;
1571                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1572                 goto out;
1573         }
1574
1575         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1576         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1577         rc = cifs_check_receive(midQ, server, 0);
1578 out:
1579         cifs_delete_mid(midQ);
1580         if (rstart && rc == -EACCES)
1581                 return -ERESTARTSYS;
1582         return rc;
1583 }