Mention branches and keyring.
[releases.git] / smb / client / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31
32 /* Max number of iovectors we can use off the stack when sending requests. */
33 #define CIFS_MAX_IOV_SIZE 8
34
35 void
36 cifs_wake_up_task(struct mid_q_entry *mid)
37 {
38         if (mid->mid_state == MID_RESPONSE_RECEIVED)
39                 mid->mid_state = MID_RESPONSE_READY;
40         wake_up_process(mid->callback_data);
41 }
42
43 static struct mid_q_entry *
44 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
45 {
46         struct mid_q_entry *temp;
47
48         if (server == NULL) {
49                 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
50                 return NULL;
51         }
52
53         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
54         memset(temp, 0, sizeof(struct mid_q_entry));
55         kref_init(&temp->refcount);
56         temp->mid = get_mid(smb_buffer);
57         temp->pid = current->pid;
58         temp->command = cpu_to_le16(smb_buffer->Command);
59         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
60         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
61         /* when mid allocated can be before when sent */
62         temp->when_alloc = jiffies;
63         temp->server = server;
64
65         /*
66          * The default is for the mid to be synchronous, so the
67          * default callback just wakes up the current task.
68          */
69         get_task_struct(current);
70         temp->creator = current;
71         temp->callback = cifs_wake_up_task;
72         temp->callback_data = current;
73
74         atomic_inc(&mid_count);
75         temp->mid_state = MID_REQUEST_ALLOCATED;
76         return temp;
77 }
78
79 void __release_mid(struct kref *refcount)
80 {
81         struct mid_q_entry *midEntry =
82                         container_of(refcount, struct mid_q_entry, refcount);
83 #ifdef CONFIG_CIFS_STATS2
84         __le16 command = midEntry->server->vals->lock_cmd;
85         __u16 smb_cmd = le16_to_cpu(midEntry->command);
86         unsigned long now;
87         unsigned long roundtrip_time;
88 #endif
89         struct TCP_Server_Info *server = midEntry->server;
90
91         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
92             (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
93              midEntry->mid_state == MID_RESPONSE_READY) &&
94             server->ops->handle_cancelled_mid)
95                 server->ops->handle_cancelled_mid(midEntry, server);
96
97         midEntry->mid_state = MID_FREE;
98         atomic_dec(&mid_count);
99         if (midEntry->large_buf)
100                 cifs_buf_release(midEntry->resp_buf);
101         else
102                 cifs_small_buf_release(midEntry->resp_buf);
103 #ifdef CONFIG_CIFS_STATS2
104         now = jiffies;
105         if (now < midEntry->when_alloc)
106                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
107         roundtrip_time = now - midEntry->when_alloc;
108
109         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
110                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
111                         server->slowest_cmd[smb_cmd] = roundtrip_time;
112                         server->fastest_cmd[smb_cmd] = roundtrip_time;
113                 } else {
114                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
115                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
116                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
117                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
118                 }
119                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
120                 server->time_per_cmd[smb_cmd] += roundtrip_time;
121         }
122         /*
123          * commands taking longer than one second (default) can be indications
124          * that something is wrong, unless it is quite a slow link or a very
125          * busy server. Note that this calc is unlikely or impossible to wrap
126          * as long as slow_rsp_threshold is not set way above recommended max
127          * value (32767 ie 9 hours) and is generally harmless even if wrong
128          * since only affects debug counters - so leaving the calc as simple
129          * comparison rather than doing multiple conversions and overflow
130          * checks
131          */
132         if ((slow_rsp_threshold != 0) &&
133             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
134             (midEntry->command != command)) {
135                 /*
136                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
137                  * NB: le16_to_cpu returns unsigned so can not be negative below
138                  */
139                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
140                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
141
142                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
143                                midEntry->when_sent, midEntry->when_received);
144                 if (cifsFYI & CIFS_TIMER) {
145                         pr_debug("slow rsp: cmd %d mid %llu",
146                                  midEntry->command, midEntry->mid);
147                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
148                                   now - midEntry->when_alloc,
149                                   now - midEntry->when_sent,
150                                   now - midEntry->when_received);
151                 }
152         }
153 #endif
154         put_task_struct(midEntry->creator);
155
156         mempool_free(midEntry, cifs_mid_poolp);
157 }
158
159 void
160 delete_mid(struct mid_q_entry *mid)
161 {
162         spin_lock(&mid->server->mid_lock);
163         if (!(mid->mid_flags & MID_DELETED)) {
164                 list_del_init(&mid->qhead);
165                 mid->mid_flags |= MID_DELETED;
166         }
167         spin_unlock(&mid->server->mid_lock);
168
169         release_mid(mid);
170 }
171
172 /*
173  * smb_send_kvec - send an array of kvecs to the server
174  * @server:     Server to send the data to
175  * @smb_msg:    Message to send
176  * @sent:       amount of data sent on socket is stored here
177  *
178  * Our basic "send data to server" function. Should be called with srv_mutex
179  * held. The caller is responsible for handling the results.
180  */
181 static int
182 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
183               size_t *sent)
184 {
185         int rc = 0;
186         int retries = 0;
187         struct socket *ssocket = server->ssocket;
188
189         *sent = 0;
190
191         if (server->noblocksnd)
192                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
193         else
194                 smb_msg->msg_flags = MSG_NOSIGNAL;
195
196         while (msg_data_left(smb_msg)) {
197                 /*
198                  * If blocking send, we try 3 times, since each can block
199                  * for 5 seconds. For nonblocking  we have to try more
200                  * but wait increasing amounts of time allowing time for
201                  * socket to clear.  The overall time we wait in either
202                  * case to send on the socket is about 15 seconds.
203                  * Similarly we wait for 15 seconds for a response from
204                  * the server in SendReceive[2] for the server to send
205                  * a response back for most types of requests (except
206                  * SMB Write past end of file which can be slow, and
207                  * blocking lock operations). NFS waits slightly longer
208                  * than CIFS, but this can make it take longer for
209                  * nonresponsive servers to be detected and 15 seconds
210                  * is more than enough time for modern networks to
211                  * send a packet.  In most cases if we fail to send
212                  * after the retries we will kill the socket and
213                  * reconnect which may clear the network problem.
214                  */
215                 rc = sock_sendmsg(ssocket, smb_msg);
216                 if (rc == -EAGAIN) {
217                         retries++;
218                         if (retries >= 14 ||
219                             (!server->noblocksnd && (retries > 2))) {
220                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
221                                          ssocket);
222                                 return -EAGAIN;
223                         }
224                         msleep(1 << retries);
225                         continue;
226                 }
227
228                 if (rc < 0)
229                         return rc;
230
231                 if (rc == 0) {
232                         /* should never happen, letting socket clear before
233                            retrying is our only obvious option here */
234                         cifs_server_dbg(VFS, "tcp sent no data\n");
235                         msleep(500);
236                         continue;
237                 }
238
239                 /* send was at least partially successful */
240                 *sent += rc;
241                 retries = 0; /* in case we get ENOSPC on the next send */
242         }
243         return 0;
244 }
245
246 unsigned long
247 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
248 {
249         unsigned int i;
250         struct kvec *iov;
251         int nvec;
252         unsigned long buflen = 0;
253
254         if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
255             rqst->rq_iov[0].iov_len == 4) {
256                 iov = &rqst->rq_iov[1];
257                 nvec = rqst->rq_nvec - 1;
258         } else {
259                 iov = rqst->rq_iov;
260                 nvec = rqst->rq_nvec;
261         }
262
263         /* total up iov array first */
264         for (i = 0; i < nvec; i++)
265                 buflen += iov[i].iov_len;
266
267         /*
268          * Add in the page array if there is one. The caller needs to make
269          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
270          * multiple pages ends at page boundary, rq_tailsz needs to be set to
271          * PAGE_SIZE.
272          */
273         if (rqst->rq_npages) {
274                 if (rqst->rq_npages == 1)
275                         buflen += rqst->rq_tailsz;
276                 else {
277                         /*
278                          * If there is more than one page, calculate the
279                          * buffer length based on rq_offset and rq_tailsz
280                          */
281                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
282                                         rqst->rq_offset;
283                         buflen += rqst->rq_tailsz;
284                 }
285         }
286
287         return buflen;
288 }
289
290 static int
291 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
292                 struct smb_rqst *rqst)
293 {
294         int rc;
295         struct kvec *iov;
296         int n_vec;
297         unsigned int send_length = 0;
298         unsigned int i, j;
299         sigset_t mask, oldmask;
300         size_t total_len = 0, sent, size;
301         struct socket *ssocket = server->ssocket;
302         struct msghdr smb_msg = {};
303         __be32 rfc1002_marker;
304
305         cifs_in_send_inc(server);
306         if (cifs_rdma_enabled(server)) {
307                 /* return -EAGAIN when connecting or reconnecting */
308                 rc = -EAGAIN;
309                 if (server->smbd_conn)
310                         rc = smbd_send(server, num_rqst, rqst);
311                 goto smbd_done;
312         }
313
314         rc = -EAGAIN;
315         if (ssocket == NULL)
316                 goto out;
317
318         rc = -ERESTARTSYS;
319         if (fatal_signal_pending(current)) {
320                 cifs_dbg(FYI, "signal pending before send request\n");
321                 goto out;
322         }
323
324         rc = 0;
325         /* cork the socket */
326         tcp_sock_set_cork(ssocket->sk, true);
327
328         for (j = 0; j < num_rqst; j++)
329                 send_length += smb_rqst_len(server, &rqst[j]);
330         rfc1002_marker = cpu_to_be32(send_length);
331
332         /*
333          * We should not allow signals to interrupt the network send because
334          * any partial send will cause session reconnects thus increasing
335          * latency of system calls and overload a server with unnecessary
336          * requests.
337          */
338
339         sigfillset(&mask);
340         sigprocmask(SIG_BLOCK, &mask, &oldmask);
341
342         /* Generate a rfc1002 marker for SMB2+ */
343         if (!is_smb1(server)) {
344                 struct kvec hiov = {
345                         .iov_base = &rfc1002_marker,
346                         .iov_len  = 4
347                 };
348                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
349                 rc = smb_send_kvec(server, &smb_msg, &sent);
350                 if (rc < 0)
351                         goto unmask;
352
353                 total_len += sent;
354                 send_length += 4;
355         }
356
357         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
358
359         for (j = 0; j < num_rqst; j++) {
360                 iov = rqst[j].rq_iov;
361                 n_vec = rqst[j].rq_nvec;
362
363                 size = 0;
364                 for (i = 0; i < n_vec; i++) {
365                         dump_smb(iov[i].iov_base, iov[i].iov_len);
366                         size += iov[i].iov_len;
367                 }
368
369                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
370
371                 rc = smb_send_kvec(server, &smb_msg, &sent);
372                 if (rc < 0)
373                         goto unmask;
374
375                 total_len += sent;
376
377                 /* now walk the page array and send each page in it */
378                 for (i = 0; i < rqst[j].rq_npages; i++) {
379                         struct bio_vec bvec;
380
381                         bvec.bv_page = rqst[j].rq_pages[i];
382                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
383                                              &bvec.bv_offset);
384
385                         iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
386                                       &bvec, 1, bvec.bv_len);
387                         rc = smb_send_kvec(server, &smb_msg, &sent);
388                         if (rc < 0)
389                                 break;
390
391                         total_len += sent;
392                 }
393         }
394
395 unmask:
396         sigprocmask(SIG_SETMASK, &oldmask, NULL);
397
398         /*
399          * If signal is pending but we have already sent the whole packet to
400          * the server we need to return success status to allow a corresponding
401          * mid entry to be kept in the pending requests queue thus allowing
402          * to handle responses from the server by the client.
403          *
404          * If only part of the packet has been sent there is no need to hide
405          * interrupt because the session will be reconnected anyway, so there
406          * won't be any response from the server to handle.
407          */
408
409         if (signal_pending(current) && (total_len != send_length)) {
410                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
411                 rc = -ERESTARTSYS;
412         }
413
414         /* uncork it */
415         tcp_sock_set_cork(ssocket->sk, false);
416
417         if ((total_len > 0) && (total_len != send_length)) {
418                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
419                          send_length, total_len);
420                 /*
421                  * If we have only sent part of an SMB then the next SMB could
422                  * be taken as the remainder of this one. We need to kill the
423                  * socket so the server throws away the partial SMB
424                  */
425                 cifs_signal_cifsd_for_reconnect(server, false);
426                 trace_smb3_partial_send_reconnect(server->CurrentMid,
427                                                   server->conn_id, server->hostname);
428         }
429 smbd_done:
430         /*
431          * there's hardly any use for the layers above to know the
432          * actual error code here. All they should do at this point is
433          * to retry the connection and hope it goes away.
434          */
435         if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
436                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
437                          rc);
438                 rc = -ECONNABORTED;
439                 cifs_signal_cifsd_for_reconnect(server, false);
440         } else if (rc > 0)
441                 rc = 0;
442 out:
443         cifs_in_send_dec(server);
444         return rc;
445 }
446
447 static int
448 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
449               struct smb_rqst *rqst, int flags)
450 {
451         struct kvec iov;
452         struct smb2_transform_hdr *tr_hdr;
453         struct smb_rqst cur_rqst[MAX_COMPOUND];
454         int rc;
455
456         if (!(flags & CIFS_TRANSFORM_REQ))
457                 return __smb_send_rqst(server, num_rqst, rqst);
458
459         if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
460                 return -EIO;
461
462         if (!server->ops->init_transform_rq) {
463                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
464                 return -EIO;
465         }
466
467         tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
468         if (!tr_hdr)
469                 return -ENOMEM;
470
471         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
472         memset(&iov, 0, sizeof(iov));
473
474         iov.iov_base = tr_hdr;
475         iov.iov_len = sizeof(*tr_hdr);
476         cur_rqst[0].rq_iov = &iov;
477         cur_rqst[0].rq_nvec = 1;
478
479         rc = server->ops->init_transform_rq(server, num_rqst + 1,
480                                             &cur_rqst[0], rqst);
481         if (rc)
482                 goto out;
483
484         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
485         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
486 out:
487         kfree(tr_hdr);
488         return rc;
489 }
490
491 int
492 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493          unsigned int smb_buf_length)
494 {
495         struct kvec iov[2];
496         struct smb_rqst rqst = { .rq_iov = iov,
497                                  .rq_nvec = 2 };
498
499         iov[0].iov_base = smb_buffer;
500         iov[0].iov_len = 4;
501         iov[1].iov_base = (char *)smb_buffer + 4;
502         iov[1].iov_len = smb_buf_length;
503
504         return __smb_send_rqst(server, 1, &rqst);
505 }
506
507 static int
508 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
509                       const int timeout, const int flags,
510                       unsigned int *instance)
511 {
512         long rc;
513         int *credits;
514         int optype;
515         long int t;
516         int scredits, in_flight;
517
518         if (timeout < 0)
519                 t = MAX_JIFFY_OFFSET;
520         else
521                 t = msecs_to_jiffies(timeout);
522
523         optype = flags & CIFS_OP_MASK;
524
525         *instance = 0;
526
527         credits = server->ops->get_credits_field(server, optype);
528         /* Since an echo is already inflight, no need to wait to send another */
529         if (*credits <= 0 && optype == CIFS_ECHO_OP)
530                 return -EAGAIN;
531
532         spin_lock(&server->req_lock);
533         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
534                 /* oplock breaks must not be held up */
535                 server->in_flight++;
536                 if (server->in_flight > server->max_in_flight)
537                         server->max_in_flight = server->in_flight;
538                 *credits -= 1;
539                 *instance = server->reconnect_instance;
540                 scredits = *credits;
541                 in_flight = server->in_flight;
542                 spin_unlock(&server->req_lock);
543
544                 trace_smb3_nblk_credits(server->CurrentMid,
545                                 server->conn_id, server->hostname, scredits, -1, in_flight);
546                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
547                                 __func__, 1, scredits);
548
549                 return 0;
550         }
551
552         while (1) {
553                 spin_unlock(&server->req_lock);
554
555                 spin_lock(&server->srv_lock);
556                 if (server->tcpStatus == CifsExiting) {
557                         spin_unlock(&server->srv_lock);
558                         return -ENOENT;
559                 }
560                 spin_unlock(&server->srv_lock);
561
562                 spin_lock(&server->req_lock);
563                 if (*credits < num_credits) {
564                         scredits = *credits;
565                         spin_unlock(&server->req_lock);
566
567                         cifs_num_waiters_inc(server);
568                         rc = wait_event_killable_timeout(server->request_q,
569                                 has_credits(server, credits, num_credits), t);
570                         cifs_num_waiters_dec(server);
571                         if (!rc) {
572                                 spin_lock(&server->req_lock);
573                                 scredits = *credits;
574                                 in_flight = server->in_flight;
575                                 spin_unlock(&server->req_lock);
576
577                                 trace_smb3_credit_timeout(server->CurrentMid,
578                                                 server->conn_id, server->hostname, scredits,
579                                                 num_credits, in_flight);
580                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
581                                                 timeout);
582                                 return -EBUSY;
583                         }
584                         if (rc == -ERESTARTSYS)
585                                 return -ERESTARTSYS;
586                         spin_lock(&server->req_lock);
587                 } else {
588                         /*
589                          * For normal commands, reserve the last MAX_COMPOUND
590                          * credits to compound requests.
591                          * Otherwise these compounds could be permanently
592                          * starved for credits by single-credit requests.
593                          *
594                          * To prevent spinning CPU, block this thread until
595                          * there are >MAX_COMPOUND credits available.
596                          * But only do this is we already have a lot of
597                          * credits in flight to avoid triggering this check
598                          * for servers that are slow to hand out credits on
599                          * new sessions.
600                          */
601                         if (!optype && num_credits == 1 &&
602                             server->in_flight > 2 * MAX_COMPOUND &&
603                             *credits <= MAX_COMPOUND) {
604                                 spin_unlock(&server->req_lock);
605
606                                 cifs_num_waiters_inc(server);
607                                 rc = wait_event_killable_timeout(
608                                         server->request_q,
609                                         has_credits(server, credits,
610                                                     MAX_COMPOUND + 1),
611                                         t);
612                                 cifs_num_waiters_dec(server);
613                                 if (!rc) {
614                                         spin_lock(&server->req_lock);
615                                         scredits = *credits;
616                                         in_flight = server->in_flight;
617                                         spin_unlock(&server->req_lock);
618
619                                         trace_smb3_credit_timeout(
620                                                         server->CurrentMid,
621                                                         server->conn_id, server->hostname,
622                                                         scredits, num_credits, in_flight);
623                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
624                                                         timeout);
625                                         return -EBUSY;
626                                 }
627                                 if (rc == -ERESTARTSYS)
628                                         return -ERESTARTSYS;
629                                 spin_lock(&server->req_lock);
630                                 continue;
631                         }
632
633                         /*
634                          * Can not count locking commands against total
635                          * as they are allowed to block on server.
636                          */
637
638                         /* update # of requests on the wire to server */
639                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
640                                 *credits -= num_credits;
641                                 server->in_flight += num_credits;
642                                 if (server->in_flight > server->max_in_flight)
643                                         server->max_in_flight = server->in_flight;
644                                 *instance = server->reconnect_instance;
645                         }
646                         scredits = *credits;
647                         in_flight = server->in_flight;
648                         spin_unlock(&server->req_lock);
649
650                         trace_smb3_waitff_credits(server->CurrentMid,
651                                         server->conn_id, server->hostname, scredits,
652                                         -(num_credits), in_flight);
653                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
654                                         __func__, num_credits, scredits);
655                         break;
656                 }
657         }
658         return 0;
659 }
660
661 static int
662 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
663                       unsigned int *instance)
664 {
665         return wait_for_free_credits(server, 1, -1, flags,
666                                      instance);
667 }
668
669 static int
670 wait_for_compound_request(struct TCP_Server_Info *server, int num,
671                           const int flags, unsigned int *instance)
672 {
673         int *credits;
674         int scredits, in_flight;
675
676         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
677
678         spin_lock(&server->req_lock);
679         scredits = *credits;
680         in_flight = server->in_flight;
681
682         if (*credits < num) {
683                 /*
684                  * If the server is tight on resources or just gives us less
685                  * credits for other reasons (e.g. requests are coming out of
686                  * order and the server delays granting more credits until it
687                  * processes a missing mid) and we exhausted most available
688                  * credits there may be situations when we try to send
689                  * a compound request but we don't have enough credits. At this
690                  * point the client needs to decide if it should wait for
691                  * additional credits or fail the request. If at least one
692                  * request is in flight there is a high probability that the
693                  * server will return enough credits to satisfy this compound
694                  * request.
695                  *
696                  * Return immediately if no requests in flight since we will be
697                  * stuck on waiting for credits.
698                  */
699                 if (server->in_flight == 0) {
700                         spin_unlock(&server->req_lock);
701                         trace_smb3_insufficient_credits(server->CurrentMid,
702                                         server->conn_id, server->hostname, scredits,
703                                         num, in_flight);
704                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
705                                         __func__, in_flight, num, scredits);
706                         return -EDEADLK;
707                 }
708         }
709         spin_unlock(&server->req_lock);
710
711         return wait_for_free_credits(server, num, 60000, flags,
712                                      instance);
713 }
714
715 int
716 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
717                       unsigned int *num, struct cifs_credits *credits)
718 {
719         *num = size;
720         credits->value = 0;
721         credits->instance = server->reconnect_instance;
722         return 0;
723 }
724
725 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
726                         struct mid_q_entry **ppmidQ)
727 {
728         spin_lock(&ses->ses_lock);
729         if (ses->ses_status == SES_NEW) {
730                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
731                         (in_buf->Command != SMB_COM_NEGOTIATE)) {
732                         spin_unlock(&ses->ses_lock);
733                         return -EAGAIN;
734                 }
735                 /* else ok - we are setting up session */
736         }
737
738         if (ses->ses_status == SES_EXITING) {
739                 /* check if SMB session is bad because we are setting it up */
740                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
741                         spin_unlock(&ses->ses_lock);
742                         return -EAGAIN;
743                 }
744                 /* else ok - we are shutting down session */
745         }
746         spin_unlock(&ses->ses_lock);
747
748         *ppmidQ = alloc_mid(in_buf, ses->server);
749         if (*ppmidQ == NULL)
750                 return -ENOMEM;
751         spin_lock(&ses->server->mid_lock);
752         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
753         spin_unlock(&ses->server->mid_lock);
754         return 0;
755 }
756
757 static int
758 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
759 {
760         int error;
761
762         error = wait_event_state(server->response_q,
763                                  midQ->mid_state != MID_REQUEST_SUBMITTED &&
764                                  midQ->mid_state != MID_RESPONSE_RECEIVED,
765                                  (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
766         if (error < 0)
767                 return -ERESTARTSYS;
768
769         return 0;
770 }
771
772 struct mid_q_entry *
773 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
774 {
775         int rc;
776         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
777         struct mid_q_entry *mid;
778
779         if (rqst->rq_iov[0].iov_len != 4 ||
780             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
781                 return ERR_PTR(-EIO);
782
783         /* enable signing if server requires it */
784         if (server->sign)
785                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
786
787         mid = alloc_mid(hdr, server);
788         if (mid == NULL)
789                 return ERR_PTR(-ENOMEM);
790
791         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
792         if (rc) {
793                 release_mid(mid);
794                 return ERR_PTR(rc);
795         }
796
797         return mid;
798 }
799
800 /*
801  * Send a SMB request and set the callback function in the mid to handle
802  * the result. Caller is responsible for dealing with timeouts.
803  */
804 int
805 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
806                 mid_receive_t *receive, mid_callback_t *callback,
807                 mid_handle_t *handle, void *cbdata, const int flags,
808                 const struct cifs_credits *exist_credits)
809 {
810         int rc;
811         struct mid_q_entry *mid;
812         struct cifs_credits credits = { .value = 0, .instance = 0 };
813         unsigned int instance;
814         int optype;
815
816         optype = flags & CIFS_OP_MASK;
817
818         if ((flags & CIFS_HAS_CREDITS) == 0) {
819                 rc = wait_for_free_request(server, flags, &instance);
820                 if (rc)
821                         return rc;
822                 credits.value = 1;
823                 credits.instance = instance;
824         } else
825                 instance = exist_credits->instance;
826
827         cifs_server_lock(server);
828
829         /*
830          * We can't use credits obtained from the previous session to send this
831          * request. Check if there were reconnects after we obtained credits and
832          * return -EAGAIN in such cases to let callers handle it.
833          */
834         if (instance != server->reconnect_instance) {
835                 cifs_server_unlock(server);
836                 add_credits_and_wake_if(server, &credits, optype);
837                 return -EAGAIN;
838         }
839
840         mid = server->ops->setup_async_request(server, rqst);
841         if (IS_ERR(mid)) {
842                 cifs_server_unlock(server);
843                 add_credits_and_wake_if(server, &credits, optype);
844                 return PTR_ERR(mid);
845         }
846
847         mid->receive = receive;
848         mid->callback = callback;
849         mid->callback_data = cbdata;
850         mid->handle = handle;
851         mid->mid_state = MID_REQUEST_SUBMITTED;
852
853         /* put it on the pending_mid_q */
854         spin_lock(&server->mid_lock);
855         list_add_tail(&mid->qhead, &server->pending_mid_q);
856         spin_unlock(&server->mid_lock);
857
858         /*
859          * Need to store the time in mid before calling I/O. For call_async,
860          * I/O response may come back and free the mid entry on another thread.
861          */
862         cifs_save_when_sent(mid);
863         rc = smb_send_rqst(server, 1, rqst, flags);
864
865         if (rc < 0) {
866                 revert_current_mid(server, mid->credits);
867                 server->sequence_number -= 2;
868                 delete_mid(mid);
869         }
870
871         cifs_server_unlock(server);
872
873         if (rc == 0)
874                 return 0;
875
876         add_credits_and_wake_if(server, &credits, optype);
877         return rc;
878 }
879
880 /*
881  *
882  * Send an SMB Request.  No response info (other than return code)
883  * needs to be parsed.
884  *
885  * flags indicate the type of request buffer and how long to wait
886  * and whether to log NT STATUS code (error) before mapping it to POSIX error
887  *
888  */
889 int
890 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
891                  char *in_buf, int flags)
892 {
893         int rc;
894         struct kvec iov[1];
895         struct kvec rsp_iov;
896         int resp_buf_type;
897
898         iov[0].iov_base = in_buf;
899         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
900         flags |= CIFS_NO_RSP_BUF;
901         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
902         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
903
904         return rc;
905 }
906
907 static int
908 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
909 {
910         int rc = 0;
911
912         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
913                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
914
915         spin_lock(&server->mid_lock);
916         switch (mid->mid_state) {
917         case MID_RESPONSE_READY:
918                 spin_unlock(&server->mid_lock);
919                 return rc;
920         case MID_RETRY_NEEDED:
921                 rc = -EAGAIN;
922                 break;
923         case MID_RESPONSE_MALFORMED:
924                 rc = -EIO;
925                 break;
926         case MID_SHUTDOWN:
927                 rc = -EHOSTDOWN;
928                 break;
929         default:
930                 if (!(mid->mid_flags & MID_DELETED)) {
931                         list_del_init(&mid->qhead);
932                         mid->mid_flags |= MID_DELETED;
933                 }
934                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
935                          __func__, mid->mid, mid->mid_state);
936                 rc = -EIO;
937         }
938         spin_unlock(&server->mid_lock);
939
940         release_mid(mid);
941         return rc;
942 }
943
944 static inline int
945 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
946             struct mid_q_entry *mid)
947 {
948         return server->ops->send_cancel ?
949                                 server->ops->send_cancel(server, rqst, mid) : 0;
950 }
951
952 int
953 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
954                    bool log_error)
955 {
956         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
957
958         dump_smb(mid->resp_buf, min_t(u32, 92, len));
959
960         /* convert the length into a more usable form */
961         if (server->sign) {
962                 struct kvec iov[2];
963                 int rc = 0;
964                 struct smb_rqst rqst = { .rq_iov = iov,
965                                          .rq_nvec = 2 };
966
967                 iov[0].iov_base = mid->resp_buf;
968                 iov[0].iov_len = 4;
969                 iov[1].iov_base = (char *)mid->resp_buf + 4;
970                 iov[1].iov_len = len - 4;
971                 /* FIXME: add code to kill session */
972                 rc = cifs_verify_signature(&rqst, server,
973                                            mid->sequence_number);
974                 if (rc)
975                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
976                                  rc);
977         }
978
979         /* BB special case reconnect tid and uid here? */
980         return map_and_check_smb_error(mid, log_error);
981 }
982
983 struct mid_q_entry *
984 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
985                    struct smb_rqst *rqst)
986 {
987         int rc;
988         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
989         struct mid_q_entry *mid;
990
991         if (rqst->rq_iov[0].iov_len != 4 ||
992             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
993                 return ERR_PTR(-EIO);
994
995         rc = allocate_mid(ses, hdr, &mid);
996         if (rc)
997                 return ERR_PTR(rc);
998         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
999         if (rc) {
1000                 delete_mid(mid);
1001                 return ERR_PTR(rc);
1002         }
1003         return mid;
1004 }
1005
1006 static void
1007 cifs_compound_callback(struct mid_q_entry *mid)
1008 {
1009         struct TCP_Server_Info *server = mid->server;
1010         struct cifs_credits credits;
1011
1012         credits.value = server->ops->get_credits(mid);
1013         credits.instance = server->reconnect_instance;
1014
1015         add_credits(server, &credits, mid->optype);
1016
1017         if (mid->mid_state == MID_RESPONSE_RECEIVED)
1018                 mid->mid_state = MID_RESPONSE_READY;
1019 }
1020
1021 static void
1022 cifs_compound_last_callback(struct mid_q_entry *mid)
1023 {
1024         cifs_compound_callback(mid);
1025         cifs_wake_up_task(mid);
1026 }
1027
1028 static void
1029 cifs_cancelled_callback(struct mid_q_entry *mid)
1030 {
1031         cifs_compound_callback(mid);
1032         release_mid(mid);
1033 }
1034
1035 /*
1036  * Return a channel (master if none) of @ses that can be used to send
1037  * regular requests.
1038  *
1039  * If we are currently binding a new channel (negprot/sess.setup),
1040  * return the new incomplete channel.
1041  */
1042 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1043 {
1044         uint index = 0;
1045
1046         if (!ses)
1047                 return NULL;
1048
1049         /* round robin */
1050         index = (uint)atomic_inc_return(&ses->chan_seq);
1051
1052         spin_lock(&ses->chan_lock);
1053         index %= ses->chan_count;
1054         spin_unlock(&ses->chan_lock);
1055
1056         return ses->chans[index].server;
1057 }
1058
1059 int
1060 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1061                    struct TCP_Server_Info *server,
1062                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1063                    int *resp_buf_type, struct kvec *resp_iov)
1064 {
1065         int i, j, optype, rc = 0;
1066         struct mid_q_entry *midQ[MAX_COMPOUND];
1067         bool cancelled_mid[MAX_COMPOUND] = {false};
1068         struct cifs_credits credits[MAX_COMPOUND] = {
1069                 { .value = 0, .instance = 0 }
1070         };
1071         unsigned int instance;
1072         char *buf;
1073
1074         optype = flags & CIFS_OP_MASK;
1075
1076         for (i = 0; i < num_rqst; i++)
1077                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1078
1079         if (!ses || !ses->server || !server) {
1080                 cifs_dbg(VFS, "Null session\n");
1081                 return -EIO;
1082         }
1083
1084         spin_lock(&server->srv_lock);
1085         if (server->tcpStatus == CifsExiting) {
1086                 spin_unlock(&server->srv_lock);
1087                 return -ENOENT;
1088         }
1089         spin_unlock(&server->srv_lock);
1090
1091         /*
1092          * Wait for all the requests to become available.
1093          * This approach still leaves the possibility to be stuck waiting for
1094          * credits if the server doesn't grant credits to the outstanding
1095          * requests and if the client is completely idle, not generating any
1096          * other requests.
1097          * This can be handled by the eventual session reconnect.
1098          */
1099         rc = wait_for_compound_request(server, num_rqst, flags,
1100                                        &instance);
1101         if (rc)
1102                 return rc;
1103
1104         for (i = 0; i < num_rqst; i++) {
1105                 credits[i].value = 1;
1106                 credits[i].instance = instance;
1107         }
1108
1109         /*
1110          * Make sure that we sign in the same order that we send on this socket
1111          * and avoid races inside tcp sendmsg code that could cause corruption
1112          * of smb data.
1113          */
1114
1115         cifs_server_lock(server);
1116
1117         /*
1118          * All the parts of the compound chain belong obtained credits from the
1119          * same session. We can not use credits obtained from the previous
1120          * session to send this request. Check if there were reconnects after
1121          * we obtained credits and return -EAGAIN in such cases to let callers
1122          * handle it.
1123          */
1124         if (instance != server->reconnect_instance) {
1125                 cifs_server_unlock(server);
1126                 for (j = 0; j < num_rqst; j++)
1127                         add_credits(server, &credits[j], optype);
1128                 return -EAGAIN;
1129         }
1130
1131         for (i = 0; i < num_rqst; i++) {
1132                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1133                 if (IS_ERR(midQ[i])) {
1134                         revert_current_mid(server, i);
1135                         for (j = 0; j < i; j++)
1136                                 delete_mid(midQ[j]);
1137                         cifs_server_unlock(server);
1138
1139                         /* Update # of requests on wire to server */
1140                         for (j = 0; j < num_rqst; j++)
1141                                 add_credits(server, &credits[j], optype);
1142                         return PTR_ERR(midQ[i]);
1143                 }
1144
1145                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1146                 midQ[i]->optype = optype;
1147                 /*
1148                  * Invoke callback for every part of the compound chain
1149                  * to calculate credits properly. Wake up this thread only when
1150                  * the last element is received.
1151                  */
1152                 if (i < num_rqst - 1)
1153                         midQ[i]->callback = cifs_compound_callback;
1154                 else
1155                         midQ[i]->callback = cifs_compound_last_callback;
1156         }
1157         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1158
1159         for (i = 0; i < num_rqst; i++)
1160                 cifs_save_when_sent(midQ[i]);
1161
1162         if (rc < 0) {
1163                 revert_current_mid(server, num_rqst);
1164                 server->sequence_number -= 2;
1165         }
1166
1167         cifs_server_unlock(server);
1168
1169         /*
1170          * If sending failed for some reason or it is an oplock break that we
1171          * will not receive a response to - return credits back
1172          */
1173         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1174                 for (i = 0; i < num_rqst; i++)
1175                         add_credits(server, &credits[i], optype);
1176                 goto out;
1177         }
1178
1179         /*
1180          * At this point the request is passed to the network stack - we assume
1181          * that any credits taken from the server structure on the client have
1182          * been spent and we can't return them back. Once we receive responses
1183          * we will collect credits granted by the server in the mid callbacks
1184          * and add those credits to the server structure.
1185          */
1186
1187         /*
1188          * Compounding is never used during session establish.
1189          */
1190         spin_lock(&ses->ses_lock);
1191         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1192                 spin_unlock(&ses->ses_lock);
1193
1194                 cifs_server_lock(server);
1195                 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1196                 cifs_server_unlock(server);
1197
1198                 spin_lock(&ses->ses_lock);
1199         }
1200         spin_unlock(&ses->ses_lock);
1201
1202         for (i = 0; i < num_rqst; i++) {
1203                 rc = wait_for_response(server, midQ[i]);
1204                 if (rc != 0)
1205                         break;
1206         }
1207         if (rc != 0) {
1208                 for (; i < num_rqst; i++) {
1209                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1210                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1211                         send_cancel(server, &rqst[i], midQ[i]);
1212                         spin_lock(&server->mid_lock);
1213                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1214                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1215                             midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
1216                                 midQ[i]->callback = cifs_cancelled_callback;
1217                                 cancelled_mid[i] = true;
1218                                 credits[i].value = 0;
1219                         }
1220                         spin_unlock(&server->mid_lock);
1221                 }
1222         }
1223
1224         for (i = 0; i < num_rqst; i++) {
1225                 if (rc < 0)
1226                         goto out;
1227
1228                 rc = cifs_sync_mid_result(midQ[i], server);
1229                 if (rc != 0) {
1230                         /* mark this mid as cancelled to not free it below */
1231                         cancelled_mid[i] = true;
1232                         goto out;
1233                 }
1234
1235                 if (!midQ[i]->resp_buf ||
1236                     midQ[i]->mid_state != MID_RESPONSE_READY) {
1237                         rc = -EIO;
1238                         cifs_dbg(FYI, "Bad MID state?\n");
1239                         goto out;
1240                 }
1241
1242                 buf = (char *)midQ[i]->resp_buf;
1243                 resp_iov[i].iov_base = buf;
1244                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1245                         HEADER_PREAMBLE_SIZE(server);
1246
1247                 if (midQ[i]->large_buf)
1248                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1249                 else
1250                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1251
1252                 rc = server->ops->check_receive(midQ[i], server,
1253                                                      flags & CIFS_LOG_ERROR);
1254
1255                 /* mark it so buf will not be freed by delete_mid */
1256                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1257                         midQ[i]->resp_buf = NULL;
1258
1259         }
1260
1261         /*
1262          * Compounding is never used during session establish.
1263          */
1264         spin_lock(&ses->ses_lock);
1265         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1266                 struct kvec iov = {
1267                         .iov_base = resp_iov[0].iov_base,
1268                         .iov_len = resp_iov[0].iov_len
1269                 };
1270                 spin_unlock(&ses->ses_lock);
1271                 cifs_server_lock(server);
1272                 smb311_update_preauth_hash(ses, server, &iov, 1);
1273                 cifs_server_unlock(server);
1274                 spin_lock(&ses->ses_lock);
1275         }
1276         spin_unlock(&ses->ses_lock);
1277
1278 out:
1279         /*
1280          * This will dequeue all mids. After this it is important that the
1281          * demultiplex_thread will not process any of these mids any futher.
1282          * This is prevented above by using a noop callback that will not
1283          * wake this thread except for the very last PDU.
1284          */
1285         for (i = 0; i < num_rqst; i++) {
1286                 if (!cancelled_mid[i])
1287                         delete_mid(midQ[i]);
1288         }
1289
1290         return rc;
1291 }
1292
1293 int
1294 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1295                struct TCP_Server_Info *server,
1296                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1297                struct kvec *resp_iov)
1298 {
1299         return compound_send_recv(xid, ses, server, flags, 1,
1300                                   rqst, resp_buf_type, resp_iov);
1301 }
1302
1303 int
1304 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1305              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1306              const int flags, struct kvec *resp_iov)
1307 {
1308         struct smb_rqst rqst;
1309         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1310         int rc;
1311
1312         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1313                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1314                                         GFP_KERNEL);
1315                 if (!new_iov) {
1316                         /* otherwise cifs_send_recv below sets resp_buf_type */
1317                         *resp_buf_type = CIFS_NO_BUFFER;
1318                         return -ENOMEM;
1319                 }
1320         } else
1321                 new_iov = s_iov;
1322
1323         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1324         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1325
1326         new_iov[0].iov_base = new_iov[1].iov_base;
1327         new_iov[0].iov_len = 4;
1328         new_iov[1].iov_base += 4;
1329         new_iov[1].iov_len -= 4;
1330
1331         memset(&rqst, 0, sizeof(struct smb_rqst));
1332         rqst.rq_iov = new_iov;
1333         rqst.rq_nvec = n_vec + 1;
1334
1335         rc = cifs_send_recv(xid, ses, ses->server,
1336                             &rqst, resp_buf_type, flags, resp_iov);
1337         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1338                 kfree(new_iov);
1339         return rc;
1340 }
1341
1342 int
1343 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1344             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1345             int *pbytes_returned, const int flags)
1346 {
1347         int rc = 0;
1348         struct mid_q_entry *midQ;
1349         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1350         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1351         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1352         struct cifs_credits credits = { .value = 1, .instance = 0 };
1353         struct TCP_Server_Info *server;
1354
1355         if (ses == NULL) {
1356                 cifs_dbg(VFS, "Null smb session\n");
1357                 return -EIO;
1358         }
1359         server = ses->server;
1360         if (server == NULL) {
1361                 cifs_dbg(VFS, "Null tcp session\n");
1362                 return -EIO;
1363         }
1364
1365         spin_lock(&server->srv_lock);
1366         if (server->tcpStatus == CifsExiting) {
1367                 spin_unlock(&server->srv_lock);
1368                 return -ENOENT;
1369         }
1370         spin_unlock(&server->srv_lock);
1371
1372         /* Ensure that we do not send more than 50 overlapping requests
1373            to the same server. We may make this configurable later or
1374            use ses->maxReq */
1375
1376         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1377                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1378                                 len);
1379                 return -EIO;
1380         }
1381
1382         rc = wait_for_free_request(server, flags, &credits.instance);
1383         if (rc)
1384                 return rc;
1385
1386         /* make sure that we sign in the same order that we send on this socket
1387            and avoid races inside tcp sendmsg code that could cause corruption
1388            of smb data */
1389
1390         cifs_server_lock(server);
1391
1392         rc = allocate_mid(ses, in_buf, &midQ);
1393         if (rc) {
1394                 cifs_server_unlock(server);
1395                 /* Update # of requests on wire to server */
1396                 add_credits(server, &credits, 0);
1397                 return rc;
1398         }
1399
1400         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1401         if (rc) {
1402                 cifs_server_unlock(server);
1403                 goto out;
1404         }
1405
1406         midQ->mid_state = MID_REQUEST_SUBMITTED;
1407
1408         rc = smb_send(server, in_buf, len);
1409         cifs_save_when_sent(midQ);
1410
1411         if (rc < 0)
1412                 server->sequence_number -= 2;
1413
1414         cifs_server_unlock(server);
1415
1416         if (rc < 0)
1417                 goto out;
1418
1419         rc = wait_for_response(server, midQ);
1420         if (rc != 0) {
1421                 send_cancel(server, &rqst, midQ);
1422                 spin_lock(&server->mid_lock);
1423                 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1424                     midQ->mid_state == MID_RESPONSE_RECEIVED) {
1425                         /* no longer considered to be "in-flight" */
1426                         midQ->callback = release_mid;
1427                         spin_unlock(&server->mid_lock);
1428                         add_credits(server, &credits, 0);
1429                         return rc;
1430                 }
1431                 spin_unlock(&server->mid_lock);
1432         }
1433
1434         rc = cifs_sync_mid_result(midQ, server);
1435         if (rc != 0) {
1436                 add_credits(server, &credits, 0);
1437                 return rc;
1438         }
1439
1440         if (!midQ->resp_buf || !out_buf ||
1441             midQ->mid_state != MID_RESPONSE_READY) {
1442                 rc = -EIO;
1443                 cifs_server_dbg(VFS, "Bad MID state?\n");
1444                 goto out;
1445         }
1446
1447         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1448         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1449         rc = cifs_check_receive(midQ, server, 0);
1450 out:
1451         delete_mid(midQ);
1452         add_credits(server, &credits, 0);
1453
1454         return rc;
1455 }
1456
1457 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1458    blocking lock to return. */
1459
1460 static int
1461 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1462                         struct smb_hdr *in_buf,
1463                         struct smb_hdr *out_buf)
1464 {
1465         int bytes_returned;
1466         struct cifs_ses *ses = tcon->ses;
1467         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1468
1469         /* We just modify the current in_buf to change
1470            the type of lock from LOCKING_ANDX_SHARED_LOCK
1471            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1472            LOCKING_ANDX_CANCEL_LOCK. */
1473
1474         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1475         pSMB->Timeout = 0;
1476         pSMB->hdr.Mid = get_next_mid(ses->server);
1477
1478         return SendReceive(xid, ses, in_buf, out_buf,
1479                         &bytes_returned, 0);
1480 }
1481
1482 int
1483 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1484             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1485             int *pbytes_returned)
1486 {
1487         int rc = 0;
1488         int rstart = 0;
1489         struct mid_q_entry *midQ;
1490         struct cifs_ses *ses;
1491         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1492         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1493         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1494         unsigned int instance;
1495         struct TCP_Server_Info *server;
1496
1497         if (tcon == NULL || tcon->ses == NULL) {
1498                 cifs_dbg(VFS, "Null smb session\n");
1499                 return -EIO;
1500         }
1501         ses = tcon->ses;
1502         server = ses->server;
1503
1504         if (server == NULL) {
1505                 cifs_dbg(VFS, "Null tcp session\n");
1506                 return -EIO;
1507         }
1508
1509         spin_lock(&server->srv_lock);
1510         if (server->tcpStatus == CifsExiting) {
1511                 spin_unlock(&server->srv_lock);
1512                 return -ENOENT;
1513         }
1514         spin_unlock(&server->srv_lock);
1515
1516         /* Ensure that we do not send more than 50 overlapping requests
1517            to the same server. We may make this configurable later or
1518            use ses->maxReq */
1519
1520         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1521                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1522                               len);
1523                 return -EIO;
1524         }
1525
1526         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1527         if (rc)
1528                 return rc;
1529
1530         /* make sure that we sign in the same order that we send on this socket
1531            and avoid races inside tcp sendmsg code that could cause corruption
1532            of smb data */
1533
1534         cifs_server_lock(server);
1535
1536         rc = allocate_mid(ses, in_buf, &midQ);
1537         if (rc) {
1538                 cifs_server_unlock(server);
1539                 return rc;
1540         }
1541
1542         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1543         if (rc) {
1544                 delete_mid(midQ);
1545                 cifs_server_unlock(server);
1546                 return rc;
1547         }
1548
1549         midQ->mid_state = MID_REQUEST_SUBMITTED;
1550         rc = smb_send(server, in_buf, len);
1551         cifs_save_when_sent(midQ);
1552
1553         if (rc < 0)
1554                 server->sequence_number -= 2;
1555
1556         cifs_server_unlock(server);
1557
1558         if (rc < 0) {
1559                 delete_mid(midQ);
1560                 return rc;
1561         }
1562
1563         /* Wait for a reply - allow signals to interrupt. */
1564         rc = wait_event_interruptible(server->response_q,
1565                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1566                    midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
1567                 ((server->tcpStatus != CifsGood) &&
1568                  (server->tcpStatus != CifsNew)));
1569
1570         /* Were we interrupted by a signal ? */
1571         spin_lock(&server->srv_lock);
1572         if ((rc == -ERESTARTSYS) &&
1573                 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1574                  midQ->mid_state == MID_RESPONSE_RECEIVED) &&
1575                 ((server->tcpStatus == CifsGood) ||
1576                  (server->tcpStatus == CifsNew))) {
1577                 spin_unlock(&server->srv_lock);
1578
1579                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1580                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1581                            blocking lock to return. */
1582                         rc = send_cancel(server, &rqst, midQ);
1583                         if (rc) {
1584                                 delete_mid(midQ);
1585                                 return rc;
1586                         }
1587                 } else {
1588                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1589                            to cause the blocking lock to return. */
1590
1591                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1592
1593                         /* If we get -ENOLCK back the lock may have
1594                            already been removed. Don't exit in this case. */
1595                         if (rc && rc != -ENOLCK) {
1596                                 delete_mid(midQ);
1597                                 return rc;
1598                         }
1599                 }
1600
1601                 rc = wait_for_response(server, midQ);
1602                 if (rc) {
1603                         send_cancel(server, &rqst, midQ);
1604                         spin_lock(&server->mid_lock);
1605                         if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1606                             midQ->mid_state == MID_RESPONSE_RECEIVED) {
1607                                 /* no longer considered to be "in-flight" */
1608                                 midQ->callback = release_mid;
1609                                 spin_unlock(&server->mid_lock);
1610                                 return rc;
1611                         }
1612                         spin_unlock(&server->mid_lock);
1613                 }
1614
1615                 /* We got the response - restart system call. */
1616                 rstart = 1;
1617                 spin_lock(&server->srv_lock);
1618         }
1619         spin_unlock(&server->srv_lock);
1620
1621         rc = cifs_sync_mid_result(midQ, server);
1622         if (rc != 0)
1623                 return rc;
1624
1625         /* rcvd frame is ok */
1626         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
1627                 rc = -EIO;
1628                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1629                 goto out;
1630         }
1631
1632         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1633         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1634         rc = cifs_check_receive(midQ, server, 0);
1635 out:
1636         delete_mid(midQ);
1637         if (rstart && rc == -EACCES)
1638                 return -ERESTARTSYS;
1639         return rc;
1640 }
1641
1642 /*
1643  * Discard any remaining data in the current SMB. To do this, we borrow the
1644  * current bigbuf.
1645  */
1646 int
1647 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1648 {
1649         unsigned int rfclen = server->pdu_size;
1650         int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1651                 server->total_read;
1652
1653         while (remaining > 0) {
1654                 int length;
1655
1656                 length = cifs_discard_from_socket(server,
1657                                 min_t(size_t, remaining,
1658                                       CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1659                 if (length < 0)
1660                         return length;
1661                 server->total_read += length;
1662                 remaining -= length;
1663         }
1664
1665         return 0;
1666 }
1667
1668 static int
1669 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1670                      bool malformed)
1671 {
1672         int length;
1673
1674         length = cifs_discard_remaining_data(server);
1675         dequeue_mid(mid, malformed);
1676         mid->resp_buf = server->smallbuf;
1677         server->smallbuf = NULL;
1678         return length;
1679 }
1680
1681 static int
1682 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1683 {
1684         struct cifs_readdata *rdata = mid->callback_data;
1685
1686         return  __cifs_readv_discard(server, mid, rdata->result);
1687 }
1688
1689 int
1690 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1691 {
1692         int length, len;
1693         unsigned int data_offset, data_len;
1694         struct cifs_readdata *rdata = mid->callback_data;
1695         char *buf = server->smallbuf;
1696         unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1697         bool use_rdma_mr = false;
1698
1699         cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1700                  __func__, mid->mid, rdata->offset, rdata->bytes);
1701
1702         /*
1703          * read the rest of READ_RSP header (sans Data array), or whatever we
1704          * can if there's not enough data. At this point, we've read down to
1705          * the Mid.
1706          */
1707         len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1708                                                         HEADER_SIZE(server) + 1;
1709
1710         length = cifs_read_from_socket(server,
1711                                        buf + HEADER_SIZE(server) - 1, len);
1712         if (length < 0)
1713                 return length;
1714         server->total_read += length;
1715
1716         if (server->ops->is_session_expired &&
1717             server->ops->is_session_expired(buf)) {
1718                 cifs_reconnect(server, true);
1719                 return -1;
1720         }
1721
1722         if (server->ops->is_status_pending &&
1723             server->ops->is_status_pending(buf, server)) {
1724                 cifs_discard_remaining_data(server);
1725                 return -1;
1726         }
1727
1728         /* set up first two iov for signature check and to get credits */
1729         rdata->iov[0].iov_base = buf;
1730         rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1731         rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1732         rdata->iov[1].iov_len =
1733                 server->total_read - HEADER_PREAMBLE_SIZE(server);
1734         cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1735                  rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1736         cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1737                  rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1738
1739         /* Was the SMB read successful? */
1740         rdata->result = server->ops->map_error(buf, false);
1741         if (rdata->result != 0) {
1742                 cifs_dbg(FYI, "%s: server returned error %d\n",
1743                          __func__, rdata->result);
1744                 /* normal error on read response */
1745                 return __cifs_readv_discard(server, mid, false);
1746         }
1747
1748         /* Is there enough to get to the rest of the READ_RSP header? */
1749         if (server->total_read < server->vals->read_rsp_size) {
1750                 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1751                          __func__, server->total_read,
1752                          server->vals->read_rsp_size);
1753                 rdata->result = -EIO;
1754                 return cifs_readv_discard(server, mid);
1755         }
1756
1757         data_offset = server->ops->read_data_offset(buf) +
1758                 HEADER_PREAMBLE_SIZE(server);
1759         if (data_offset < server->total_read) {
1760                 /*
1761                  * win2k8 sometimes sends an offset of 0 when the read
1762                  * is beyond the EOF. Treat it as if the data starts just after
1763                  * the header.
1764                  */
1765                 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1766                          __func__, data_offset);
1767                 data_offset = server->total_read;
1768         } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1769                 /* data_offset is beyond the end of smallbuf */
1770                 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1771                          __func__, data_offset);
1772                 rdata->result = -EIO;
1773                 return cifs_readv_discard(server, mid);
1774         }
1775
1776         cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1777                  __func__, server->total_read, data_offset);
1778
1779         len = data_offset - server->total_read;
1780         if (len > 0) {
1781                 /* read any junk before data into the rest of smallbuf */
1782                 length = cifs_read_from_socket(server,
1783                                                buf + server->total_read, len);
1784                 if (length < 0)
1785                         return length;
1786                 server->total_read += length;
1787         }
1788
1789         /* how much data is in the response? */
1790 #ifdef CONFIG_CIFS_SMB_DIRECT
1791         use_rdma_mr = rdata->mr;
1792 #endif
1793         data_len = server->ops->read_data_length(buf, use_rdma_mr);
1794         if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1795                 /* data_len is corrupt -- discard frame */
1796                 rdata->result = -EIO;
1797                 return cifs_readv_discard(server, mid);
1798         }
1799
1800         length = rdata->read_into_pages(server, rdata, data_len);
1801         if (length < 0)
1802                 return length;
1803
1804         server->total_read += length;
1805
1806         cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1807                  server->total_read, buflen, data_len);
1808
1809         /* discard anything left over */
1810         if (server->total_read < buflen)
1811                 return cifs_readv_discard(server, mid);
1812
1813         dequeue_mid(mid, false);
1814         mid->resp_buf = server->smallbuf;
1815         server->smallbuf = NULL;
1816         return length;
1817 }