arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / fs / smb / client / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31
32 /* Max number of iovectors we can use off the stack when sending requests. */
33 #define CIFS_MAX_IOV_SIZE 8
34
35 void
36 cifs_wake_up_task(struct mid_q_entry *mid)
37 {
38         if (mid->mid_state == MID_RESPONSE_RECEIVED)
39                 mid->mid_state = MID_RESPONSE_READY;
40         wake_up_process(mid->callback_data);
41 }
42
43 static struct mid_q_entry *
44 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
45 {
46         struct mid_q_entry *temp;
47
48         if (server == NULL) {
49                 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
50                 return NULL;
51         }
52
53         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
54         memset(temp, 0, sizeof(struct mid_q_entry));
55         kref_init(&temp->refcount);
56         temp->mid = get_mid(smb_buffer);
57         temp->pid = current->pid;
58         temp->command = cpu_to_le16(smb_buffer->Command);
59         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
60         /* easier to use jiffies */
61         /* when mid allocated can be before when sent */
62         temp->when_alloc = jiffies;
63         temp->server = server;
64
65         /*
66          * The default is for the mid to be synchronous, so the
67          * default callback just wakes up the current task.
68          */
69         get_task_struct(current);
70         temp->creator = current;
71         temp->callback = cifs_wake_up_task;
72         temp->callback_data = current;
73
74         atomic_inc(&mid_count);
75         temp->mid_state = MID_REQUEST_ALLOCATED;
76         return temp;
77 }
78
79 void __release_mid(struct kref *refcount)
80 {
81         struct mid_q_entry *midEntry =
82                         container_of(refcount, struct mid_q_entry, refcount);
83 #ifdef CONFIG_CIFS_STATS2
84         __le16 command = midEntry->server->vals->lock_cmd;
85         __u16 smb_cmd = le16_to_cpu(midEntry->command);
86         unsigned long now;
87         unsigned long roundtrip_time;
88 #endif
89         struct TCP_Server_Info *server = midEntry->server;
90
91         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
92             (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
93              midEntry->mid_state == MID_RESPONSE_READY) &&
94             server->ops->handle_cancelled_mid)
95                 server->ops->handle_cancelled_mid(midEntry, server);
96
97         midEntry->mid_state = MID_FREE;
98         atomic_dec(&mid_count);
99         if (midEntry->large_buf)
100                 cifs_buf_release(midEntry->resp_buf);
101         else
102                 cifs_small_buf_release(midEntry->resp_buf);
103 #ifdef CONFIG_CIFS_STATS2
104         now = jiffies;
105         if (now < midEntry->when_alloc)
106                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
107         roundtrip_time = now - midEntry->when_alloc;
108
109         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
110                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
111                         server->slowest_cmd[smb_cmd] = roundtrip_time;
112                         server->fastest_cmd[smb_cmd] = roundtrip_time;
113                 } else {
114                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
115                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
116                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
117                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
118                 }
119                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
120                 server->time_per_cmd[smb_cmd] += roundtrip_time;
121         }
122         /*
123          * commands taking longer than one second (default) can be indications
124          * that something is wrong, unless it is quite a slow link or a very
125          * busy server. Note that this calc is unlikely or impossible to wrap
126          * as long as slow_rsp_threshold is not set way above recommended max
127          * value (32767 ie 9 hours) and is generally harmless even if wrong
128          * since only affects debug counters - so leaving the calc as simple
129          * comparison rather than doing multiple conversions and overflow
130          * checks
131          */
132         if ((slow_rsp_threshold != 0) &&
133             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
134             (midEntry->command != command)) {
135                 /*
136                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
137                  * NB: le16_to_cpu returns unsigned so can not be negative below
138                  */
139                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
140                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
141
142                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
143                                midEntry->when_sent, midEntry->when_received);
144                 if (cifsFYI & CIFS_TIMER) {
145                         pr_debug("slow rsp: cmd %d mid %llu",
146                                  midEntry->command, midEntry->mid);
147                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
148                                   now - midEntry->when_alloc,
149                                   now - midEntry->when_sent,
150                                   now - midEntry->when_received);
151                 }
152         }
153 #endif
154         put_task_struct(midEntry->creator);
155
156         mempool_free(midEntry, cifs_mid_poolp);
157 }
158
159 void
160 delete_mid(struct mid_q_entry *mid)
161 {
162         spin_lock(&mid->server->mid_lock);
163         if (!(mid->mid_flags & MID_DELETED)) {
164                 list_del_init(&mid->qhead);
165                 mid->mid_flags |= MID_DELETED;
166         }
167         spin_unlock(&mid->server->mid_lock);
168
169         release_mid(mid);
170 }
171
172 /*
173  * smb_send_kvec - send an array of kvecs to the server
174  * @server:     Server to send the data to
175  * @smb_msg:    Message to send
176  * @sent:       amount of data sent on socket is stored here
177  *
178  * Our basic "send data to server" function. Should be called with srv_mutex
179  * held. The caller is responsible for handling the results.
180  */
181 static int
182 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
183               size_t *sent)
184 {
185         int rc = 0;
186         int retries = 0;
187         struct socket *ssocket = server->ssocket;
188
189         *sent = 0;
190
191         if (server->noblocksnd)
192                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
193         else
194                 smb_msg->msg_flags = MSG_NOSIGNAL;
195
196         while (msg_data_left(smb_msg)) {
197                 /*
198                  * If blocking send, we try 3 times, since each can block
199                  * for 5 seconds. For nonblocking  we have to try more
200                  * but wait increasing amounts of time allowing time for
201                  * socket to clear.  The overall time we wait in either
202                  * case to send on the socket is about 15 seconds.
203                  * Similarly we wait for 15 seconds for a response from
204                  * the server in SendReceive[2] for the server to send
205                  * a response back for most types of requests (except
206                  * SMB Write past end of file which can be slow, and
207                  * blocking lock operations). NFS waits slightly longer
208                  * than CIFS, but this can make it take longer for
209                  * nonresponsive servers to be detected and 15 seconds
210                  * is more than enough time for modern networks to
211                  * send a packet.  In most cases if we fail to send
212                  * after the retries we will kill the socket and
213                  * reconnect which may clear the network problem.
214                  */
215                 rc = sock_sendmsg(ssocket, smb_msg);
216                 if (rc == -EAGAIN) {
217                         retries++;
218                         if (retries >= 14 ||
219                             (!server->noblocksnd && (retries > 2))) {
220                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
221                                          ssocket);
222                                 return -EAGAIN;
223                         }
224                         msleep(1 << retries);
225                         continue;
226                 }
227
228                 if (rc < 0)
229                         return rc;
230
231                 if (rc == 0) {
232                         /* should never happen, letting socket clear before
233                            retrying is our only obvious option here */
234                         cifs_server_dbg(VFS, "tcp sent no data\n");
235                         msleep(500);
236                         continue;
237                 }
238
239                 /* send was at least partially successful */
240                 *sent += rc;
241                 retries = 0; /* in case we get ENOSPC on the next send */
242         }
243         return 0;
244 }
245
246 unsigned long
247 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
248 {
249         unsigned int i;
250         struct kvec *iov;
251         int nvec;
252         unsigned long buflen = 0;
253
254         if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
255             rqst->rq_iov[0].iov_len == 4) {
256                 iov = &rqst->rq_iov[1];
257                 nvec = rqst->rq_nvec - 1;
258         } else {
259                 iov = rqst->rq_iov;
260                 nvec = rqst->rq_nvec;
261         }
262
263         /* total up iov array first */
264         for (i = 0; i < nvec; i++)
265                 buflen += iov[i].iov_len;
266
267         buflen += iov_iter_count(&rqst->rq_iter);
268         return buflen;
269 }
270
271 static int
272 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
273                 struct smb_rqst *rqst)
274 {
275         int rc;
276         struct kvec *iov;
277         int n_vec;
278         unsigned int send_length = 0;
279         unsigned int i, j;
280         sigset_t mask, oldmask;
281         size_t total_len = 0, sent, size;
282         struct socket *ssocket = server->ssocket;
283         struct msghdr smb_msg = {};
284         __be32 rfc1002_marker;
285
286         cifs_in_send_inc(server);
287         if (cifs_rdma_enabled(server)) {
288                 /* return -EAGAIN when connecting or reconnecting */
289                 rc = -EAGAIN;
290                 if (server->smbd_conn)
291                         rc = smbd_send(server, num_rqst, rqst);
292                 goto smbd_done;
293         }
294
295         rc = -EAGAIN;
296         if (ssocket == NULL)
297                 goto out;
298
299         rc = -ERESTARTSYS;
300         if (fatal_signal_pending(current)) {
301                 cifs_dbg(FYI, "signal pending before send request\n");
302                 goto out;
303         }
304
305         rc = 0;
306         /* cork the socket */
307         tcp_sock_set_cork(ssocket->sk, true);
308
309         for (j = 0; j < num_rqst; j++)
310                 send_length += smb_rqst_len(server, &rqst[j]);
311         rfc1002_marker = cpu_to_be32(send_length);
312
313         /*
314          * We should not allow signals to interrupt the network send because
315          * any partial send will cause session reconnects thus increasing
316          * latency of system calls and overload a server with unnecessary
317          * requests.
318          */
319
320         sigfillset(&mask);
321         sigprocmask(SIG_BLOCK, &mask, &oldmask);
322
323         /* Generate a rfc1002 marker for SMB2+ */
324         if (!is_smb1(server)) {
325                 struct kvec hiov = {
326                         .iov_base = &rfc1002_marker,
327                         .iov_len  = 4
328                 };
329                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
330                 rc = smb_send_kvec(server, &smb_msg, &sent);
331                 if (rc < 0)
332                         goto unmask;
333
334                 total_len += sent;
335                 send_length += 4;
336         }
337
338         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
339
340         for (j = 0; j < num_rqst; j++) {
341                 iov = rqst[j].rq_iov;
342                 n_vec = rqst[j].rq_nvec;
343
344                 size = 0;
345                 for (i = 0; i < n_vec; i++) {
346                         dump_smb(iov[i].iov_base, iov[i].iov_len);
347                         size += iov[i].iov_len;
348                 }
349
350                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
351
352                 rc = smb_send_kvec(server, &smb_msg, &sent);
353                 if (rc < 0)
354                         goto unmask;
355
356                 total_len += sent;
357
358                 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
359                         smb_msg.msg_iter = rqst[j].rq_iter;
360                         rc = smb_send_kvec(server, &smb_msg, &sent);
361                         if (rc < 0)
362                                 break;
363                         total_len += sent;
364                 }
365
366 }
367
368 unmask:
369         sigprocmask(SIG_SETMASK, &oldmask, NULL);
370
371         /*
372          * If signal is pending but we have already sent the whole packet to
373          * the server we need to return success status to allow a corresponding
374          * mid entry to be kept in the pending requests queue thus allowing
375          * to handle responses from the server by the client.
376          *
377          * If only part of the packet has been sent there is no need to hide
378          * interrupt because the session will be reconnected anyway, so there
379          * won't be any response from the server to handle.
380          */
381
382         if (signal_pending(current) && (total_len != send_length)) {
383                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
384                 rc = -ERESTARTSYS;
385         }
386
387         /* uncork it */
388         tcp_sock_set_cork(ssocket->sk, false);
389
390         if ((total_len > 0) && (total_len != send_length)) {
391                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
392                          send_length, total_len);
393                 /*
394                  * If we have only sent part of an SMB then the next SMB could
395                  * be taken as the remainder of this one. We need to kill the
396                  * socket so the server throws away the partial SMB
397                  */
398                 cifs_signal_cifsd_for_reconnect(server, false);
399                 trace_smb3_partial_send_reconnect(server->CurrentMid,
400                                                   server->conn_id, server->hostname);
401         }
402 smbd_done:
403         if (rc < 0 && rc != -EINTR)
404                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
405                          rc);
406         else if (rc > 0)
407                 rc = 0;
408 out:
409         cifs_in_send_dec(server);
410         return rc;
411 }
412
413 struct send_req_vars {
414         struct smb2_transform_hdr tr_hdr;
415         struct smb_rqst rqst[MAX_COMPOUND];
416         struct kvec iov;
417 };
418
419 static int
420 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
421               struct smb_rqst *rqst, int flags)
422 {
423         struct send_req_vars *vars;
424         struct smb_rqst *cur_rqst;
425         struct kvec *iov;
426         int rc;
427
428         if (!(flags & CIFS_TRANSFORM_REQ))
429                 return __smb_send_rqst(server, num_rqst, rqst);
430
431         if (num_rqst > MAX_COMPOUND - 1)
432                 return -ENOMEM;
433
434         if (!server->ops->init_transform_rq) {
435                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
436                 return -EIO;
437         }
438
439         vars = kzalloc(sizeof(*vars), GFP_NOFS);
440         if (!vars)
441                 return -ENOMEM;
442         cur_rqst = vars->rqst;
443         iov = &vars->iov;
444
445         iov->iov_base = &vars->tr_hdr;
446         iov->iov_len = sizeof(vars->tr_hdr);
447         cur_rqst[0].rq_iov = iov;
448         cur_rqst[0].rq_nvec = 1;
449
450         rc = server->ops->init_transform_rq(server, num_rqst + 1,
451                                             &cur_rqst[0], rqst);
452         if (rc)
453                 goto out;
454
455         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
456         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
457 out:
458         kfree(vars);
459         return rc;
460 }
461
462 int
463 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
464          unsigned int smb_buf_length)
465 {
466         struct kvec iov[2];
467         struct smb_rqst rqst = { .rq_iov = iov,
468                                  .rq_nvec = 2 };
469
470         iov[0].iov_base = smb_buffer;
471         iov[0].iov_len = 4;
472         iov[1].iov_base = (char *)smb_buffer + 4;
473         iov[1].iov_len = smb_buf_length;
474
475         return __smb_send_rqst(server, 1, &rqst);
476 }
477
478 static int
479 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
480                       const int timeout, const int flags,
481                       unsigned int *instance)
482 {
483         long rc;
484         int *credits;
485         int optype;
486         long int t;
487         int scredits, in_flight;
488
489         if (timeout < 0)
490                 t = MAX_JIFFY_OFFSET;
491         else
492                 t = msecs_to_jiffies(timeout);
493
494         optype = flags & CIFS_OP_MASK;
495
496         *instance = 0;
497
498         credits = server->ops->get_credits_field(server, optype);
499         /* Since an echo is already inflight, no need to wait to send another */
500         if (*credits <= 0 && optype == CIFS_ECHO_OP)
501                 return -EAGAIN;
502
503         spin_lock(&server->req_lock);
504         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
505                 /* oplock breaks must not be held up */
506                 server->in_flight++;
507                 if (server->in_flight > server->max_in_flight)
508                         server->max_in_flight = server->in_flight;
509                 *credits -= 1;
510                 *instance = server->reconnect_instance;
511                 scredits = *credits;
512                 in_flight = server->in_flight;
513                 spin_unlock(&server->req_lock);
514
515                 trace_smb3_nblk_credits(server->CurrentMid,
516                                 server->conn_id, server->hostname, scredits, -1, in_flight);
517                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
518                                 __func__, 1, scredits);
519
520                 return 0;
521         }
522
523         while (1) {
524                 spin_unlock(&server->req_lock);
525
526                 spin_lock(&server->srv_lock);
527                 if (server->tcpStatus == CifsExiting) {
528                         spin_unlock(&server->srv_lock);
529                         return -ENOENT;
530                 }
531                 spin_unlock(&server->srv_lock);
532
533                 spin_lock(&server->req_lock);
534                 if (*credits < num_credits) {
535                         scredits = *credits;
536                         spin_unlock(&server->req_lock);
537
538                         cifs_num_waiters_inc(server);
539                         rc = wait_event_killable_timeout(server->request_q,
540                                 has_credits(server, credits, num_credits), t);
541                         cifs_num_waiters_dec(server);
542                         if (!rc) {
543                                 spin_lock(&server->req_lock);
544                                 scredits = *credits;
545                                 in_flight = server->in_flight;
546                                 spin_unlock(&server->req_lock);
547
548                                 trace_smb3_credit_timeout(server->CurrentMid,
549                                                 server->conn_id, server->hostname, scredits,
550                                                 num_credits, in_flight);
551                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
552                                                 timeout);
553                                 return -EBUSY;
554                         }
555                         if (rc == -ERESTARTSYS)
556                                 return -ERESTARTSYS;
557                         spin_lock(&server->req_lock);
558                 } else {
559                         /*
560                          * For normal commands, reserve the last MAX_COMPOUND
561                          * credits to compound requests.
562                          * Otherwise these compounds could be permanently
563                          * starved for credits by single-credit requests.
564                          *
565                          * To prevent spinning CPU, block this thread until
566                          * there are >MAX_COMPOUND credits available.
567                          * But only do this is we already have a lot of
568                          * credits in flight to avoid triggering this check
569                          * for servers that are slow to hand out credits on
570                          * new sessions.
571                          */
572                         if (!optype && num_credits == 1 &&
573                             server->in_flight > 2 * MAX_COMPOUND &&
574                             *credits <= MAX_COMPOUND) {
575                                 spin_unlock(&server->req_lock);
576
577                                 cifs_num_waiters_inc(server);
578                                 rc = wait_event_killable_timeout(
579                                         server->request_q,
580                                         has_credits(server, credits,
581                                                     MAX_COMPOUND + 1),
582                                         t);
583                                 cifs_num_waiters_dec(server);
584                                 if (!rc) {
585                                         spin_lock(&server->req_lock);
586                                         scredits = *credits;
587                                         in_flight = server->in_flight;
588                                         spin_unlock(&server->req_lock);
589
590                                         trace_smb3_credit_timeout(
591                                                         server->CurrentMid,
592                                                         server->conn_id, server->hostname,
593                                                         scredits, num_credits, in_flight);
594                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
595                                                         timeout);
596                                         return -EBUSY;
597                                 }
598                                 if (rc == -ERESTARTSYS)
599                                         return -ERESTARTSYS;
600                                 spin_lock(&server->req_lock);
601                                 continue;
602                         }
603
604                         /*
605                          * Can not count locking commands against total
606                          * as they are allowed to block on server.
607                          */
608
609                         /* update # of requests on the wire to server */
610                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
611                                 *credits -= num_credits;
612                                 server->in_flight += num_credits;
613                                 if (server->in_flight > server->max_in_flight)
614                                         server->max_in_flight = server->in_flight;
615                                 *instance = server->reconnect_instance;
616                         }
617                         scredits = *credits;
618                         in_flight = server->in_flight;
619                         spin_unlock(&server->req_lock);
620
621                         trace_smb3_waitff_credits(server->CurrentMid,
622                                         server->conn_id, server->hostname, scredits,
623                                         -(num_credits), in_flight);
624                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
625                                         __func__, num_credits, scredits);
626                         break;
627                 }
628         }
629         return 0;
630 }
631
632 static int
633 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
634                       unsigned int *instance)
635 {
636         return wait_for_free_credits(server, 1, -1, flags,
637                                      instance);
638 }
639
640 static int
641 wait_for_compound_request(struct TCP_Server_Info *server, int num,
642                           const int flags, unsigned int *instance)
643 {
644         int *credits;
645         int scredits, in_flight;
646
647         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
648
649         spin_lock(&server->req_lock);
650         scredits = *credits;
651         in_flight = server->in_flight;
652
653         if (*credits < num) {
654                 /*
655                  * If the server is tight on resources or just gives us less
656                  * credits for other reasons (e.g. requests are coming out of
657                  * order and the server delays granting more credits until it
658                  * processes a missing mid) and we exhausted most available
659                  * credits there may be situations when we try to send
660                  * a compound request but we don't have enough credits. At this
661                  * point the client needs to decide if it should wait for
662                  * additional credits or fail the request. If at least one
663                  * request is in flight there is a high probability that the
664                  * server will return enough credits to satisfy this compound
665                  * request.
666                  *
667                  * Return immediately if no requests in flight since we will be
668                  * stuck on waiting for credits.
669                  */
670                 if (server->in_flight == 0) {
671                         spin_unlock(&server->req_lock);
672                         trace_smb3_insufficient_credits(server->CurrentMid,
673                                         server->conn_id, server->hostname, scredits,
674                                         num, in_flight);
675                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
676                                         __func__, in_flight, num, scredits);
677                         return -EDEADLK;
678                 }
679         }
680         spin_unlock(&server->req_lock);
681
682         return wait_for_free_credits(server, num, 60000, flags,
683                                      instance);
684 }
685
686 int
687 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
688                       unsigned int *num, struct cifs_credits *credits)
689 {
690         *num = size;
691         credits->value = 0;
692         credits->instance = server->reconnect_instance;
693         return 0;
694 }
695
696 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
697                         struct mid_q_entry **ppmidQ)
698 {
699         spin_lock(&ses->ses_lock);
700         if (ses->ses_status == SES_NEW) {
701                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
702                         (in_buf->Command != SMB_COM_NEGOTIATE)) {
703                         spin_unlock(&ses->ses_lock);
704                         return -EAGAIN;
705                 }
706                 /* else ok - we are setting up session */
707         }
708
709         if (ses->ses_status == SES_EXITING) {
710                 /* check if SMB session is bad because we are setting it up */
711                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
712                         spin_unlock(&ses->ses_lock);
713                         return -EAGAIN;
714                 }
715                 /* else ok - we are shutting down session */
716         }
717         spin_unlock(&ses->ses_lock);
718
719         *ppmidQ = alloc_mid(in_buf, ses->server);
720         if (*ppmidQ == NULL)
721                 return -ENOMEM;
722         spin_lock(&ses->server->mid_lock);
723         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
724         spin_unlock(&ses->server->mid_lock);
725         return 0;
726 }
727
728 static int
729 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
730 {
731         int error;
732
733         error = wait_event_state(server->response_q,
734                                  midQ->mid_state != MID_REQUEST_SUBMITTED &&
735                                  midQ->mid_state != MID_RESPONSE_RECEIVED,
736                                  (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
737         if (error < 0)
738                 return -ERESTARTSYS;
739
740         return 0;
741 }
742
743 struct mid_q_entry *
744 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
745 {
746         int rc;
747         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
748         struct mid_q_entry *mid;
749
750         if (rqst->rq_iov[0].iov_len != 4 ||
751             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
752                 return ERR_PTR(-EIO);
753
754         /* enable signing if server requires it */
755         if (server->sign)
756                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
757
758         mid = alloc_mid(hdr, server);
759         if (mid == NULL)
760                 return ERR_PTR(-ENOMEM);
761
762         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
763         if (rc) {
764                 release_mid(mid);
765                 return ERR_PTR(rc);
766         }
767
768         return mid;
769 }
770
771 /*
772  * Send a SMB request and set the callback function in the mid to handle
773  * the result. Caller is responsible for dealing with timeouts.
774  */
775 int
776 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
777                 mid_receive_t *receive, mid_callback_t *callback,
778                 mid_handle_t *handle, void *cbdata, const int flags,
779                 const struct cifs_credits *exist_credits)
780 {
781         int rc;
782         struct mid_q_entry *mid;
783         struct cifs_credits credits = { .value = 0, .instance = 0 };
784         unsigned int instance;
785         int optype;
786
787         optype = flags & CIFS_OP_MASK;
788
789         if ((flags & CIFS_HAS_CREDITS) == 0) {
790                 rc = wait_for_free_request(server, flags, &instance);
791                 if (rc)
792                         return rc;
793                 credits.value = 1;
794                 credits.instance = instance;
795         } else
796                 instance = exist_credits->instance;
797
798         cifs_server_lock(server);
799
800         /*
801          * We can't use credits obtained from the previous session to send this
802          * request. Check if there were reconnects after we obtained credits and
803          * return -EAGAIN in such cases to let callers handle it.
804          */
805         if (instance != server->reconnect_instance) {
806                 cifs_server_unlock(server);
807                 add_credits_and_wake_if(server, &credits, optype);
808                 return -EAGAIN;
809         }
810
811         mid = server->ops->setup_async_request(server, rqst);
812         if (IS_ERR(mid)) {
813                 cifs_server_unlock(server);
814                 add_credits_and_wake_if(server, &credits, optype);
815                 return PTR_ERR(mid);
816         }
817
818         mid->receive = receive;
819         mid->callback = callback;
820         mid->callback_data = cbdata;
821         mid->handle = handle;
822         mid->mid_state = MID_REQUEST_SUBMITTED;
823
824         /* put it on the pending_mid_q */
825         spin_lock(&server->mid_lock);
826         list_add_tail(&mid->qhead, &server->pending_mid_q);
827         spin_unlock(&server->mid_lock);
828
829         /*
830          * Need to store the time in mid before calling I/O. For call_async,
831          * I/O response may come back and free the mid entry on another thread.
832          */
833         cifs_save_when_sent(mid);
834         rc = smb_send_rqst(server, 1, rqst, flags);
835
836         if (rc < 0) {
837                 revert_current_mid(server, mid->credits);
838                 server->sequence_number -= 2;
839                 delete_mid(mid);
840         }
841
842         cifs_server_unlock(server);
843
844         if (rc == 0)
845                 return 0;
846
847         add_credits_and_wake_if(server, &credits, optype);
848         return rc;
849 }
850
851 /*
852  *
853  * Send an SMB Request.  No response info (other than return code)
854  * needs to be parsed.
855  *
856  * flags indicate the type of request buffer and how long to wait
857  * and whether to log NT STATUS code (error) before mapping it to POSIX error
858  *
859  */
860 int
861 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
862                  char *in_buf, int flags)
863 {
864         int rc;
865         struct kvec iov[1];
866         struct kvec rsp_iov;
867         int resp_buf_type;
868
869         iov[0].iov_base = in_buf;
870         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
871         flags |= CIFS_NO_RSP_BUF;
872         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
873         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
874
875         return rc;
876 }
877
878 static int
879 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
880 {
881         int rc = 0;
882
883         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
884                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
885
886         spin_lock(&server->mid_lock);
887         switch (mid->mid_state) {
888         case MID_RESPONSE_READY:
889                 spin_unlock(&server->mid_lock);
890                 return rc;
891         case MID_RETRY_NEEDED:
892                 rc = -EAGAIN;
893                 break;
894         case MID_RESPONSE_MALFORMED:
895                 rc = -EIO;
896                 break;
897         case MID_SHUTDOWN:
898                 rc = -EHOSTDOWN;
899                 break;
900         default:
901                 if (!(mid->mid_flags & MID_DELETED)) {
902                         list_del_init(&mid->qhead);
903                         mid->mid_flags |= MID_DELETED;
904                 }
905                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
906                          __func__, mid->mid, mid->mid_state);
907                 rc = -EIO;
908         }
909         spin_unlock(&server->mid_lock);
910
911         release_mid(mid);
912         return rc;
913 }
914
915 static inline int
916 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
917             struct mid_q_entry *mid)
918 {
919         return server->ops->send_cancel ?
920                                 server->ops->send_cancel(server, rqst, mid) : 0;
921 }
922
923 int
924 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
925                    bool log_error)
926 {
927         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
928
929         dump_smb(mid->resp_buf, min_t(u32, 92, len));
930
931         /* convert the length into a more usable form */
932         if (server->sign) {
933                 struct kvec iov[2];
934                 int rc = 0;
935                 struct smb_rqst rqst = { .rq_iov = iov,
936                                          .rq_nvec = 2 };
937
938                 iov[0].iov_base = mid->resp_buf;
939                 iov[0].iov_len = 4;
940                 iov[1].iov_base = (char *)mid->resp_buf + 4;
941                 iov[1].iov_len = len - 4;
942                 /* FIXME: add code to kill session */
943                 rc = cifs_verify_signature(&rqst, server,
944                                            mid->sequence_number);
945                 if (rc)
946                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
947                                  rc);
948         }
949
950         /* BB special case reconnect tid and uid here? */
951         return map_and_check_smb_error(mid, log_error);
952 }
953
954 struct mid_q_entry *
955 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
956                    struct smb_rqst *rqst)
957 {
958         int rc;
959         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
960         struct mid_q_entry *mid;
961
962         if (rqst->rq_iov[0].iov_len != 4 ||
963             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
964                 return ERR_PTR(-EIO);
965
966         rc = allocate_mid(ses, hdr, &mid);
967         if (rc)
968                 return ERR_PTR(rc);
969         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
970         if (rc) {
971                 delete_mid(mid);
972                 return ERR_PTR(rc);
973         }
974         return mid;
975 }
976
977 static void
978 cifs_compound_callback(struct mid_q_entry *mid)
979 {
980         struct TCP_Server_Info *server = mid->server;
981         struct cifs_credits credits;
982
983         credits.value = server->ops->get_credits(mid);
984         credits.instance = server->reconnect_instance;
985
986         add_credits(server, &credits, mid->optype);
987
988         if (mid->mid_state == MID_RESPONSE_RECEIVED)
989                 mid->mid_state = MID_RESPONSE_READY;
990 }
991
992 static void
993 cifs_compound_last_callback(struct mid_q_entry *mid)
994 {
995         cifs_compound_callback(mid);
996         cifs_wake_up_task(mid);
997 }
998
999 static void
1000 cifs_cancelled_callback(struct mid_q_entry *mid)
1001 {
1002         cifs_compound_callback(mid);
1003         release_mid(mid);
1004 }
1005
1006 /*
1007  * Return a channel (master if none) of @ses that can be used to send
1008  * regular requests.
1009  *
1010  * If we are currently binding a new channel (negprot/sess.setup),
1011  * return the new incomplete channel.
1012  */
1013 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1014 {
1015         uint index = 0;
1016         unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1017         struct TCP_Server_Info *server = NULL;
1018         int i;
1019
1020         if (!ses)
1021                 return NULL;
1022
1023         spin_lock(&ses->chan_lock);
1024         for (i = 0; i < ses->chan_count; i++) {
1025                 server = ses->chans[i].server;
1026                 if (!server || server->terminate)
1027                         continue;
1028
1029                 /*
1030                  * strictly speaking, we should pick up req_lock to read
1031                  * server->in_flight. But it shouldn't matter much here if we
1032                  * race while reading this data. The worst that can happen is
1033                  * that we could use a channel that's not least loaded. Avoiding
1034                  * taking the lock could help reduce wait time, which is
1035                  * important for this function
1036                  */
1037                 if (server->in_flight < min_in_flight) {
1038                         min_in_flight = server->in_flight;
1039                         index = i;
1040                 }
1041                 if (server->in_flight > max_in_flight)
1042                         max_in_flight = server->in_flight;
1043         }
1044
1045         /* if all channels are equally loaded, fall back to round-robin */
1046         if (min_in_flight == max_in_flight) {
1047                 index = (uint)atomic_inc_return(&ses->chan_seq);
1048                 index %= ses->chan_count;
1049         }
1050         spin_unlock(&ses->chan_lock);
1051
1052         return ses->chans[index].server;
1053 }
1054
1055 int
1056 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1057                    struct TCP_Server_Info *server,
1058                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1059                    int *resp_buf_type, struct kvec *resp_iov)
1060 {
1061         int i, j, optype, rc = 0;
1062         struct mid_q_entry *midQ[MAX_COMPOUND];
1063         bool cancelled_mid[MAX_COMPOUND] = {false};
1064         struct cifs_credits credits[MAX_COMPOUND] = {
1065                 { .value = 0, .instance = 0 }
1066         };
1067         unsigned int instance;
1068         char *buf;
1069
1070         optype = flags & CIFS_OP_MASK;
1071
1072         for (i = 0; i < num_rqst; i++)
1073                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1074
1075         if (!ses || !ses->server || !server) {
1076                 cifs_dbg(VFS, "Null session\n");
1077                 return -EIO;
1078         }
1079
1080         spin_lock(&server->srv_lock);
1081         if (server->tcpStatus == CifsExiting) {
1082                 spin_unlock(&server->srv_lock);
1083                 return -ENOENT;
1084         }
1085         spin_unlock(&server->srv_lock);
1086
1087         /*
1088          * Wait for all the requests to become available.
1089          * This approach still leaves the possibility to be stuck waiting for
1090          * credits if the server doesn't grant credits to the outstanding
1091          * requests and if the client is completely idle, not generating any
1092          * other requests.
1093          * This can be handled by the eventual session reconnect.
1094          */
1095         rc = wait_for_compound_request(server, num_rqst, flags,
1096                                        &instance);
1097         if (rc)
1098                 return rc;
1099
1100         for (i = 0; i < num_rqst; i++) {
1101                 credits[i].value = 1;
1102                 credits[i].instance = instance;
1103         }
1104
1105         /*
1106          * Make sure that we sign in the same order that we send on this socket
1107          * and avoid races inside tcp sendmsg code that could cause corruption
1108          * of smb data.
1109          */
1110
1111         cifs_server_lock(server);
1112
1113         /*
1114          * All the parts of the compound chain belong obtained credits from the
1115          * same session. We can not use credits obtained from the previous
1116          * session to send this request. Check if there were reconnects after
1117          * we obtained credits and return -EAGAIN in such cases to let callers
1118          * handle it.
1119          */
1120         if (instance != server->reconnect_instance) {
1121                 cifs_server_unlock(server);
1122                 for (j = 0; j < num_rqst; j++)
1123                         add_credits(server, &credits[j], optype);
1124                 return -EAGAIN;
1125         }
1126
1127         for (i = 0; i < num_rqst; i++) {
1128                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1129                 if (IS_ERR(midQ[i])) {
1130                         revert_current_mid(server, i);
1131                         for (j = 0; j < i; j++)
1132                                 delete_mid(midQ[j]);
1133                         cifs_server_unlock(server);
1134
1135                         /* Update # of requests on wire to server */
1136                         for (j = 0; j < num_rqst; j++)
1137                                 add_credits(server, &credits[j], optype);
1138                         return PTR_ERR(midQ[i]);
1139                 }
1140
1141                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1142                 midQ[i]->optype = optype;
1143                 /*
1144                  * Invoke callback for every part of the compound chain
1145                  * to calculate credits properly. Wake up this thread only when
1146                  * the last element is received.
1147                  */
1148                 if (i < num_rqst - 1)
1149                         midQ[i]->callback = cifs_compound_callback;
1150                 else
1151                         midQ[i]->callback = cifs_compound_last_callback;
1152         }
1153         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1154
1155         for (i = 0; i < num_rqst; i++)
1156                 cifs_save_when_sent(midQ[i]);
1157
1158         if (rc < 0) {
1159                 revert_current_mid(server, num_rqst);
1160                 server->sequence_number -= 2;
1161         }
1162
1163         cifs_server_unlock(server);
1164
1165         /*
1166          * If sending failed for some reason or it is an oplock break that we
1167          * will not receive a response to - return credits back
1168          */
1169         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1170                 for (i = 0; i < num_rqst; i++)
1171                         add_credits(server, &credits[i], optype);
1172                 goto out;
1173         }
1174
1175         /*
1176          * At this point the request is passed to the network stack - we assume
1177          * that any credits taken from the server structure on the client have
1178          * been spent and we can't return them back. Once we receive responses
1179          * we will collect credits granted by the server in the mid callbacks
1180          * and add those credits to the server structure.
1181          */
1182
1183         /*
1184          * Compounding is never used during session establish.
1185          */
1186         spin_lock(&ses->ses_lock);
1187         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1188                 spin_unlock(&ses->ses_lock);
1189
1190                 cifs_server_lock(server);
1191                 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1192                 cifs_server_unlock(server);
1193
1194                 spin_lock(&ses->ses_lock);
1195         }
1196         spin_unlock(&ses->ses_lock);
1197
1198         for (i = 0; i < num_rqst; i++) {
1199                 rc = wait_for_response(server, midQ[i]);
1200                 if (rc != 0)
1201                         break;
1202         }
1203         if (rc != 0) {
1204                 for (; i < num_rqst; i++) {
1205                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1206                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1207                         send_cancel(server, &rqst[i], midQ[i]);
1208                         spin_lock(&server->mid_lock);
1209                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1210                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1211                             midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
1212                                 midQ[i]->callback = cifs_cancelled_callback;
1213                                 cancelled_mid[i] = true;
1214                                 credits[i].value = 0;
1215                         }
1216                         spin_unlock(&server->mid_lock);
1217                 }
1218         }
1219
1220         for (i = 0; i < num_rqst; i++) {
1221                 if (rc < 0)
1222                         goto out;
1223
1224                 rc = cifs_sync_mid_result(midQ[i], server);
1225                 if (rc != 0) {
1226                         /* mark this mid as cancelled to not free it below */
1227                         cancelled_mid[i] = true;
1228                         goto out;
1229                 }
1230
1231                 if (!midQ[i]->resp_buf ||
1232                     midQ[i]->mid_state != MID_RESPONSE_READY) {
1233                         rc = -EIO;
1234                         cifs_dbg(FYI, "Bad MID state?\n");
1235                         goto out;
1236                 }
1237
1238                 buf = (char *)midQ[i]->resp_buf;
1239                 resp_iov[i].iov_base = buf;
1240                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1241                         HEADER_PREAMBLE_SIZE(server);
1242
1243                 if (midQ[i]->large_buf)
1244                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1245                 else
1246                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1247
1248                 rc = server->ops->check_receive(midQ[i], server,
1249                                                      flags & CIFS_LOG_ERROR);
1250
1251                 /* mark it so buf will not be freed by delete_mid */
1252                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1253                         midQ[i]->resp_buf = NULL;
1254
1255         }
1256
1257         /*
1258          * Compounding is never used during session establish.
1259          */
1260         spin_lock(&ses->ses_lock);
1261         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1262                 struct kvec iov = {
1263                         .iov_base = resp_iov[0].iov_base,
1264                         .iov_len = resp_iov[0].iov_len
1265                 };
1266                 spin_unlock(&ses->ses_lock);
1267                 cifs_server_lock(server);
1268                 smb311_update_preauth_hash(ses, server, &iov, 1);
1269                 cifs_server_unlock(server);
1270                 spin_lock(&ses->ses_lock);
1271         }
1272         spin_unlock(&ses->ses_lock);
1273
1274 out:
1275         /*
1276          * This will dequeue all mids. After this it is important that the
1277          * demultiplex_thread will not process any of these mids any futher.
1278          * This is prevented above by using a noop callback that will not
1279          * wake this thread except for the very last PDU.
1280          */
1281         for (i = 0; i < num_rqst; i++) {
1282                 if (!cancelled_mid[i])
1283                         delete_mid(midQ[i]);
1284         }
1285
1286         return rc;
1287 }
1288
1289 int
1290 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1291                struct TCP_Server_Info *server,
1292                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1293                struct kvec *resp_iov)
1294 {
1295         return compound_send_recv(xid, ses, server, flags, 1,
1296                                   rqst, resp_buf_type, resp_iov);
1297 }
1298
1299 int
1300 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1301              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1302              const int flags, struct kvec *resp_iov)
1303 {
1304         struct smb_rqst rqst;
1305         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1306         int rc;
1307
1308         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1309                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1310                                         GFP_KERNEL);
1311                 if (!new_iov) {
1312                         /* otherwise cifs_send_recv below sets resp_buf_type */
1313                         *resp_buf_type = CIFS_NO_BUFFER;
1314                         return -ENOMEM;
1315                 }
1316         } else
1317                 new_iov = s_iov;
1318
1319         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1320         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1321
1322         new_iov[0].iov_base = new_iov[1].iov_base;
1323         new_iov[0].iov_len = 4;
1324         new_iov[1].iov_base += 4;
1325         new_iov[1].iov_len -= 4;
1326
1327         memset(&rqst, 0, sizeof(struct smb_rqst));
1328         rqst.rq_iov = new_iov;
1329         rqst.rq_nvec = n_vec + 1;
1330
1331         rc = cifs_send_recv(xid, ses, ses->server,
1332                             &rqst, resp_buf_type, flags, resp_iov);
1333         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1334                 kfree(new_iov);
1335         return rc;
1336 }
1337
1338 int
1339 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1340             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1341             int *pbytes_returned, const int flags)
1342 {
1343         int rc = 0;
1344         struct mid_q_entry *midQ;
1345         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1346         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1347         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1348         struct cifs_credits credits = { .value = 1, .instance = 0 };
1349         struct TCP_Server_Info *server;
1350
1351         if (ses == NULL) {
1352                 cifs_dbg(VFS, "Null smb session\n");
1353                 return -EIO;
1354         }
1355         server = ses->server;
1356         if (server == NULL) {
1357                 cifs_dbg(VFS, "Null tcp session\n");
1358                 return -EIO;
1359         }
1360
1361         spin_lock(&server->srv_lock);
1362         if (server->tcpStatus == CifsExiting) {
1363                 spin_unlock(&server->srv_lock);
1364                 return -ENOENT;
1365         }
1366         spin_unlock(&server->srv_lock);
1367
1368         /* Ensure that we do not send more than 50 overlapping requests
1369            to the same server. We may make this configurable later or
1370            use ses->maxReq */
1371
1372         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1373                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1374                                 len);
1375                 return -EIO;
1376         }
1377
1378         rc = wait_for_free_request(server, flags, &credits.instance);
1379         if (rc)
1380                 return rc;
1381
1382         /* make sure that we sign in the same order that we send on this socket
1383            and avoid races inside tcp sendmsg code that could cause corruption
1384            of smb data */
1385
1386         cifs_server_lock(server);
1387
1388         rc = allocate_mid(ses, in_buf, &midQ);
1389         if (rc) {
1390                 cifs_server_unlock(server);
1391                 /* Update # of requests on wire to server */
1392                 add_credits(server, &credits, 0);
1393                 return rc;
1394         }
1395
1396         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1397         if (rc) {
1398                 cifs_server_unlock(server);
1399                 goto out;
1400         }
1401
1402         midQ->mid_state = MID_REQUEST_SUBMITTED;
1403
1404         rc = smb_send(server, in_buf, len);
1405         cifs_save_when_sent(midQ);
1406
1407         if (rc < 0)
1408                 server->sequence_number -= 2;
1409
1410         cifs_server_unlock(server);
1411
1412         if (rc < 0)
1413                 goto out;
1414
1415         rc = wait_for_response(server, midQ);
1416         if (rc != 0) {
1417                 send_cancel(server, &rqst, midQ);
1418                 spin_lock(&server->mid_lock);
1419                 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1420                     midQ->mid_state == MID_RESPONSE_RECEIVED) {
1421                         /* no longer considered to be "in-flight" */
1422                         midQ->callback = release_mid;
1423                         spin_unlock(&server->mid_lock);
1424                         add_credits(server, &credits, 0);
1425                         return rc;
1426                 }
1427                 spin_unlock(&server->mid_lock);
1428         }
1429
1430         rc = cifs_sync_mid_result(midQ, server);
1431         if (rc != 0) {
1432                 add_credits(server, &credits, 0);
1433                 return rc;
1434         }
1435
1436         if (!midQ->resp_buf || !out_buf ||
1437             midQ->mid_state != MID_RESPONSE_READY) {
1438                 rc = -EIO;
1439                 cifs_server_dbg(VFS, "Bad MID state?\n");
1440                 goto out;
1441         }
1442
1443         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1444         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1445         rc = cifs_check_receive(midQ, server, 0);
1446 out:
1447         delete_mid(midQ);
1448         add_credits(server, &credits, 0);
1449
1450         return rc;
1451 }
1452
1453 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1454    blocking lock to return. */
1455
1456 static int
1457 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1458                         struct smb_hdr *in_buf,
1459                         struct smb_hdr *out_buf)
1460 {
1461         int bytes_returned;
1462         struct cifs_ses *ses = tcon->ses;
1463         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1464
1465         /* We just modify the current in_buf to change
1466            the type of lock from LOCKING_ANDX_SHARED_LOCK
1467            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1468            LOCKING_ANDX_CANCEL_LOCK. */
1469
1470         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1471         pSMB->Timeout = 0;
1472         pSMB->hdr.Mid = get_next_mid(ses->server);
1473
1474         return SendReceive(xid, ses, in_buf, out_buf,
1475                         &bytes_returned, 0);
1476 }
1477
1478 int
1479 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1480             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1481             int *pbytes_returned)
1482 {
1483         int rc = 0;
1484         int rstart = 0;
1485         struct mid_q_entry *midQ;
1486         struct cifs_ses *ses;
1487         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1488         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1489         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1490         unsigned int instance;
1491         struct TCP_Server_Info *server;
1492
1493         if (tcon == NULL || tcon->ses == NULL) {
1494                 cifs_dbg(VFS, "Null smb session\n");
1495                 return -EIO;
1496         }
1497         ses = tcon->ses;
1498         server = ses->server;
1499
1500         if (server == NULL) {
1501                 cifs_dbg(VFS, "Null tcp session\n");
1502                 return -EIO;
1503         }
1504
1505         spin_lock(&server->srv_lock);
1506         if (server->tcpStatus == CifsExiting) {
1507                 spin_unlock(&server->srv_lock);
1508                 return -ENOENT;
1509         }
1510         spin_unlock(&server->srv_lock);
1511
1512         /* Ensure that we do not send more than 50 overlapping requests
1513            to the same server. We may make this configurable later or
1514            use ses->maxReq */
1515
1516         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1517                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1518                               len);
1519                 return -EIO;
1520         }
1521
1522         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1523         if (rc)
1524                 return rc;
1525
1526         /* make sure that we sign in the same order that we send on this socket
1527            and avoid races inside tcp sendmsg code that could cause corruption
1528            of smb data */
1529
1530         cifs_server_lock(server);
1531
1532         rc = allocate_mid(ses, in_buf, &midQ);
1533         if (rc) {
1534                 cifs_server_unlock(server);
1535                 return rc;
1536         }
1537
1538         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1539         if (rc) {
1540                 delete_mid(midQ);
1541                 cifs_server_unlock(server);
1542                 return rc;
1543         }
1544
1545         midQ->mid_state = MID_REQUEST_SUBMITTED;
1546         rc = smb_send(server, in_buf, len);
1547         cifs_save_when_sent(midQ);
1548
1549         if (rc < 0)
1550                 server->sequence_number -= 2;
1551
1552         cifs_server_unlock(server);
1553
1554         if (rc < 0) {
1555                 delete_mid(midQ);
1556                 return rc;
1557         }
1558
1559         /* Wait for a reply - allow signals to interrupt. */
1560         rc = wait_event_interruptible(server->response_q,
1561                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1562                    midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
1563                 ((server->tcpStatus != CifsGood) &&
1564                  (server->tcpStatus != CifsNew)));
1565
1566         /* Were we interrupted by a signal ? */
1567         spin_lock(&server->srv_lock);
1568         if ((rc == -ERESTARTSYS) &&
1569                 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1570                  midQ->mid_state == MID_RESPONSE_RECEIVED) &&
1571                 ((server->tcpStatus == CifsGood) ||
1572                  (server->tcpStatus == CifsNew))) {
1573                 spin_unlock(&server->srv_lock);
1574
1575                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1576                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1577                            blocking lock to return. */
1578                         rc = send_cancel(server, &rqst, midQ);
1579                         if (rc) {
1580                                 delete_mid(midQ);
1581                                 return rc;
1582                         }
1583                 } else {
1584                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1585                            to cause the blocking lock to return. */
1586
1587                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1588
1589                         /* If we get -ENOLCK back the lock may have
1590                            already been removed. Don't exit in this case. */
1591                         if (rc && rc != -ENOLCK) {
1592                                 delete_mid(midQ);
1593                                 return rc;
1594                         }
1595                 }
1596
1597                 rc = wait_for_response(server, midQ);
1598                 if (rc) {
1599                         send_cancel(server, &rqst, midQ);
1600                         spin_lock(&server->mid_lock);
1601                         if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1602                             midQ->mid_state == MID_RESPONSE_RECEIVED) {
1603                                 /* no longer considered to be "in-flight" */
1604                                 midQ->callback = release_mid;
1605                                 spin_unlock(&server->mid_lock);
1606                                 return rc;
1607                         }
1608                         spin_unlock(&server->mid_lock);
1609                 }
1610
1611                 /* We got the response - restart system call. */
1612                 rstart = 1;
1613                 spin_lock(&server->srv_lock);
1614         }
1615         spin_unlock(&server->srv_lock);
1616
1617         rc = cifs_sync_mid_result(midQ, server);
1618         if (rc != 0)
1619                 return rc;
1620
1621         /* rcvd frame is ok */
1622         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
1623                 rc = -EIO;
1624                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1625                 goto out;
1626         }
1627
1628         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1629         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1630         rc = cifs_check_receive(midQ, server, 0);
1631 out:
1632         delete_mid(midQ);
1633         if (rstart && rc == -EACCES)
1634                 return -ERESTARTSYS;
1635         return rc;
1636 }
1637
1638 /*
1639  * Discard any remaining data in the current SMB. To do this, we borrow the
1640  * current bigbuf.
1641  */
1642 int
1643 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1644 {
1645         unsigned int rfclen = server->pdu_size;
1646         size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1647                 server->total_read;
1648
1649         while (remaining > 0) {
1650                 ssize_t length;
1651
1652                 length = cifs_discard_from_socket(server,
1653                                 min_t(size_t, remaining,
1654                                       CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1655                 if (length < 0)
1656                         return length;
1657                 server->total_read += length;
1658                 remaining -= length;
1659         }
1660
1661         return 0;
1662 }
1663
1664 static int
1665 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1666                      bool malformed)
1667 {
1668         int length;
1669
1670         length = cifs_discard_remaining_data(server);
1671         dequeue_mid(mid, malformed);
1672         mid->resp_buf = server->smallbuf;
1673         server->smallbuf = NULL;
1674         return length;
1675 }
1676
1677 static int
1678 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1679 {
1680         struct cifs_readdata *rdata = mid->callback_data;
1681
1682         return  __cifs_readv_discard(server, mid, rdata->result);
1683 }
1684
1685 int
1686 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1687 {
1688         int length, len;
1689         unsigned int data_offset, data_len;
1690         struct cifs_readdata *rdata = mid->callback_data;
1691         char *buf = server->smallbuf;
1692         unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1693         bool use_rdma_mr = false;
1694
1695         cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1696                  __func__, mid->mid, rdata->offset, rdata->bytes);
1697
1698         /*
1699          * read the rest of READ_RSP header (sans Data array), or whatever we
1700          * can if there's not enough data. At this point, we've read down to
1701          * the Mid.
1702          */
1703         len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1704                                                         HEADER_SIZE(server) + 1;
1705
1706         length = cifs_read_from_socket(server,
1707                                        buf + HEADER_SIZE(server) - 1, len);
1708         if (length < 0)
1709                 return length;
1710         server->total_read += length;
1711
1712         if (server->ops->is_session_expired &&
1713             server->ops->is_session_expired(buf)) {
1714                 cifs_reconnect(server, true);
1715                 return -1;
1716         }
1717
1718         if (server->ops->is_status_pending &&
1719             server->ops->is_status_pending(buf, server)) {
1720                 cifs_discard_remaining_data(server);
1721                 return -1;
1722         }
1723
1724         /* set up first two iov for signature check and to get credits */
1725         rdata->iov[0].iov_base = buf;
1726         rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1727         rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1728         rdata->iov[1].iov_len =
1729                 server->total_read - HEADER_PREAMBLE_SIZE(server);
1730         cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1731                  rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1732         cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1733                  rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1734
1735         /* Was the SMB read successful? */
1736         rdata->result = server->ops->map_error(buf, false);
1737         if (rdata->result != 0) {
1738                 cifs_dbg(FYI, "%s: server returned error %d\n",
1739                          __func__, rdata->result);
1740                 /* normal error on read response */
1741                 return __cifs_readv_discard(server, mid, false);
1742         }
1743
1744         /* Is there enough to get to the rest of the READ_RSP header? */
1745         if (server->total_read < server->vals->read_rsp_size) {
1746                 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1747                          __func__, server->total_read,
1748                          server->vals->read_rsp_size);
1749                 rdata->result = -EIO;
1750                 return cifs_readv_discard(server, mid);
1751         }
1752
1753         data_offset = server->ops->read_data_offset(buf) +
1754                 HEADER_PREAMBLE_SIZE(server);
1755         if (data_offset < server->total_read) {
1756                 /*
1757                  * win2k8 sometimes sends an offset of 0 when the read
1758                  * is beyond the EOF. Treat it as if the data starts just after
1759                  * the header.
1760                  */
1761                 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1762                          __func__, data_offset);
1763                 data_offset = server->total_read;
1764         } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1765                 /* data_offset is beyond the end of smallbuf */
1766                 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1767                          __func__, data_offset);
1768                 rdata->result = -EIO;
1769                 return cifs_readv_discard(server, mid);
1770         }
1771
1772         cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1773                  __func__, server->total_read, data_offset);
1774
1775         len = data_offset - server->total_read;
1776         if (len > 0) {
1777                 /* read any junk before data into the rest of smallbuf */
1778                 length = cifs_read_from_socket(server,
1779                                                buf + server->total_read, len);
1780                 if (length < 0)
1781                         return length;
1782                 server->total_read += length;
1783         }
1784
1785         /* how much data is in the response? */
1786 #ifdef CONFIG_CIFS_SMB_DIRECT
1787         use_rdma_mr = rdata->mr;
1788 #endif
1789         data_len = server->ops->read_data_length(buf, use_rdma_mr);
1790         if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1791                 /* data_len is corrupt -- discard frame */
1792                 rdata->result = -EIO;
1793                 return cifs_readv_discard(server, mid);
1794         }
1795
1796 #ifdef CONFIG_CIFS_SMB_DIRECT
1797         if (rdata->mr)
1798                 length = data_len; /* An RDMA read is already done. */
1799         else
1800 #endif
1801                 length = cifs_read_iter_from_socket(server, &rdata->iter,
1802                                                     data_len);
1803         if (length > 0)
1804                 rdata->got_bytes += length;
1805         server->total_read += length;
1806
1807         cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1808                  server->total_read, buflen, data_len);
1809
1810         /* discard anything left over */
1811         if (server->total_read < buflen)
1812                 return cifs_readv_discard(server, mid);
1813
1814         dequeue_mid(mid, false);
1815         mid->resp_buf = server->smallbuf;
1816         server->smallbuf = NULL;
1817         return length;
1818 }